• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bitcoindevkit / bdk / 10539621204

24 Aug 2024 03:32PM UTC coverage: 82.079% (+0.2%) from 81.848%
10539621204

Pull #1569

github

web-flow
Merge 48259deeb into 9e6ac72a6
Pull Request #1569: `bdk_core` WIP WIP WIP

533 of 616 new or added lines in 16 files covered. (86.53%)

5 existing lines in 4 files now uncovered.

11230 of 13682 relevant lines covered (82.08%)

13503.08 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.42
/crates/esplora/src/async_ext.rs
1
use async_trait::async_trait;
2
use bdk_core::collections::{BTreeMap, BTreeSet, HashSet};
3
use bdk_core::spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult};
4
use bdk_core::{
5
    bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
6
    tx_graph, BlockId, CheckPoint, ConfirmationBlockTime, Indexed,
7
};
8
use futures::{stream::FuturesOrdered, TryStreamExt};
9

10
use crate::{insert_anchor_from_status, insert_prevouts};
11

12
/// [`esplora_client::Error`]
13
type Error = Box<esplora_client::Error>;
14

15
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
16
///
17
/// Refer to [crate-level documentation](crate) for more.
18
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
19
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
20
pub trait EsploraAsyncExt {
21
    /// Scan keychain scripts for transactions against Esplora, returning an update that can be
22
    /// applied to the receiving structures.
23
    ///
24
    /// `request` provides the data required to perform a script-pubkey-based full scan
25
    /// (see [`FullScanRequest`]). The full scan for each keychain (`K`) stops after a gap of
26
    /// `stop_gap` script pubkeys with no associated transactions. `parallel_requests` specifies
27
    /// the maximum number of HTTP requests to make in parallel.
28
    ///
29
    /// Refer to [crate-level docs](crate) for more.
30
    async fn full_scan<K: Ord + Clone + Send, R: Into<FullScanRequest<K>> + Send>(
31
        &self,
32
        request: R,
33
        stop_gap: usize,
34
        parallel_requests: usize,
35
    ) -> Result<FullScanResult<K>, Error>;
36

37
    /// Sync a set of scripts, txids, and/or outpoints against Esplora.
38
    ///
39
    /// `request` provides the data required to perform a script-pubkey-based sync (see
40
    /// [`SyncRequest`]). `parallel_requests` specifies the maximum number of HTTP requests to make
41
    /// in parallel.
42
    ///
43
    /// Refer to [crate-level docs](crate) for more.
44
    async fn sync<I: Send, R: Into<SyncRequest<I>> + Send>(
45
        &self,
46
        request: R,
47
        parallel_requests: usize,
48
    ) -> Result<SyncResult, Error>;
49
}
50

51
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
52
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
53
impl EsploraAsyncExt for esplora_client::AsyncClient {
54
    async fn full_scan<K: Ord + Clone + Send, R: Into<FullScanRequest<K>> + Send>(
55
        &self,
56
        request: R,
57
        stop_gap: usize,
58
        parallel_requests: usize,
59
    ) -> Result<FullScanResult<K>, Error> {
4✔
60
        let mut request = request.into();
4✔
61
        let keychains = request.keychains();
4✔
62

4✔
63
        let chain_tip = request.chain_tip();
4✔
64
        let latest_blocks = if chain_tip.is_some() {
4✔
65
            Some(fetch_latest_blocks(self).await?)
4✔
66
        } else {
4✔
67
            None
4✔
68
        };
4✔
69

4✔
70
        let mut graph_update = tx_graph::Update::<ConfirmationBlockTime>::default();
4✔
71
        let mut inserted_txs = HashSet::<Txid>::new();
4✔
72
        let mut last_active_indices = BTreeMap::<K, u32>::new();
4✔
73
        for keychain in keychains {
8✔
74
            let keychain_spks = request.iter_spks(keychain.clone());
4✔
75
            let (update, last_active_index) = fetch_txs_with_keychain_spks(
4✔
76
                self,
4✔
77
                &mut inserted_txs,
4✔
78
                keychain_spks,
4✔
79
                stop_gap,
4✔
80
                parallel_requests,
4✔
81
            )
4✔
82
            .await?;
60✔
83
            graph_update.extend(update);
4✔
84
            if let Some(last_active_index) = last_active_index {
4✔
85
                last_active_indices.insert(keychain, last_active_index);
3✔
86
            }
3✔
87
        }
4✔
88

4✔
89
        let chain_update = match (chain_tip, latest_blocks) {
4✔
90
            (Some(chain_tip), Some(latest_blocks)) => {
4✔
91
                Some(chain_update(self, &latest_blocks, &chain_tip, &graph_update.anchors).await?)
4✔
92
            }
4✔
93
            _ => None,
4✔
94
        };
4✔
95

4✔
96
        Ok(FullScanResult {
4✔
97
            chain_update,
4✔
98
            graph_update,
4✔
99
            last_active_indices,
4✔
100
        })
4✔
101
    }
4✔
102

103
    async fn sync<I: Send, R: Into<SyncRequest<I>> + Send>(
104
        &self,
105
        request: R,
106
        parallel_requests: usize,
107
    ) -> Result<SyncResult, Error> {
1✔
108
        let mut request = request.into();
1✔
109

1✔
110
        let chain_tip = request.chain_tip();
1✔
111
        let latest_blocks = if chain_tip.is_some() {
1✔
112
            Some(fetch_latest_blocks(self).await?)
1✔
113
        } else {
1✔
114
            None
1✔
115
        };
1✔
116

1✔
117
        let mut graph_update = tx_graph::Update::<ConfirmationBlockTime>::default();
1✔
118
        let mut inserted_txs = HashSet::<Txid>::new();
1✔
119
        graph_update.extend(
1✔
120
            fetch_txs_with_spks(
1✔
121
                self,
1✔
122
                &mut inserted_txs,
1✔
123
                request.iter_spks(),
1✔
124
                parallel_requests,
1✔
125
            )
1✔
126
            .await?,
4✔
127
        );
1✔
128
        graph_update.extend(
1✔
129
            fetch_txs_with_txids(
1✔
130
                self,
1✔
131
                &mut inserted_txs,
1✔
132
                request.iter_txids(),
1✔
133
                parallel_requests,
1✔
134
            )
1✔
135
            .await?,
1✔
136
        );
1✔
137
        graph_update.extend(
1✔
138
            fetch_txs_with_outpoints(
1✔
139
                self,
1✔
140
                &mut inserted_txs,
1✔
141
                request.iter_outpoints(),
1✔
142
                parallel_requests,
1✔
143
            )
1✔
144
            .await?,
1✔
145
        );
1✔
146

1✔
147
        let chain_update = match (chain_tip, latest_blocks) {
1✔
148
            (Some(chain_tip), Some(latest_blocks)) => {
1✔
149
                Some(chain_update(self, &latest_blocks, &chain_tip, &graph_update.anchors).await?)
1✔
150
            }
1✔
151
            _ => None,
1✔
152
        };
1✔
153

1✔
154
        Ok(SyncResult {
1✔
155
            chain_update,
1✔
156
            graph_update,
1✔
157
        })
1✔
158
    }
1✔
159
}
160

161
/// Fetch latest blocks from Esplora in an atomic call.
162
///
163
/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks AND
164
/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for
165
/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use
166
/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when
167
/// alternating between chain-sources.
168
async fn fetch_latest_blocks(
16✔
169
    client: &esplora_client::AsyncClient,
16✔
170
) -> Result<BTreeMap<u32, BlockHash>, Error> {
16✔
171
    Ok(client
11✔
172
        .get_blocks(None)
11✔
173
        .await?
11✔
174
        .into_iter()
11✔
175
        .map(|b| (b.time.height, b.id))
110✔
176
        .collect())
11✔
177
}
11✔
178

179
/// Used instead of [`esplora_client::BlockingClient::get_block_hash`].
180
///
181
/// This first checks the previously fetched `latest_blocks` before fetching from Esplora again.
182
async fn fetch_block(
28✔
183
    client: &esplora_client::AsyncClient,
28✔
184
    latest_blocks: &BTreeMap<u32, BlockHash>,
28✔
185
    height: u32,
28✔
186
) -> Result<Option<BlockHash>, Error> {
28✔
187
    if let Some(&hash) = latest_blocks.get(&height) {
22✔
188
        return Ok(Some(hash));
9✔
189
    }
13✔
190

13✔
191
    // We avoid fetching blocks higher than previously fetched `latest_blocks` as the local chain
13✔
192
    // tip is used to signal for the last-synced-up-to-height.
13✔
193
    let &tip_height = latest_blocks
13✔
194
        .keys()
13✔
195
        .last()
13✔
196
        .expect("must have atleast one entry");
13✔
197
    if height > tip_height {
13✔
198
        return Ok(None);
×
199
    }
13✔
200

13✔
201
    Ok(Some(client.get_block_hash(height).await?))
13✔
202
}
22✔
203

204
/// Create the [`local_chain::Update`].
205
///
206
/// We want to have a corresponding checkpoint per anchor height. However, checkpoints fetched
207
/// should not surpass `latest_blocks`.
208
async fn chain_update(
16✔
209
    client: &esplora_client::AsyncClient,
16✔
210
    latest_blocks: &BTreeMap<u32, BlockHash>,
16✔
211
    local_tip: &CheckPoint,
16✔
212
    anchors: &BTreeSet<(ConfirmationBlockTime, Txid)>,
16✔
213
) -> Result<CheckPoint, Error> {
16✔
214
    let mut point_of_agreement = None;
11✔
215
    let mut conflicts = vec![];
11✔
216
    for local_cp in local_tip.iter() {
11✔
217
        let remote_hash = match fetch_block(client, latest_blocks, local_cp.height()).await? {
11✔
218
            Some(hash) => hash,
11✔
219
            None => continue,
×
220
        };
221
        if remote_hash == local_cp.hash() {
11✔
222
            point_of_agreement = Some(local_cp.clone());
11✔
223
            break;
11✔
224
        } else {
×
225
            // it is not strictly necessary to include all the conflicted heights (we do need the
×
226
            // first one) but it seems prudent to make sure the updated chain's heights are a
×
227
            // superset of the existing chain after update.
×
228
            conflicts.push(BlockId {
×
229
                height: local_cp.height(),
×
230
                hash: remote_hash,
×
231
            });
×
232
        }
×
233
    }
234

235
    let mut tip = point_of_agreement.expect("remote esplora should have same genesis block");
11✔
236

11✔
237
    tip = tip
11✔
238
        .extend(conflicts.into_iter().rev())
11✔
239
        .expect("evicted are in order");
11✔
240

241
    for (anchor, _txid) in anchors {
27✔
242
        let height = anchor.block_id.height;
16✔
243
        if tip.get(height).is_none() {
16✔
244
            let hash = match fetch_block(client, latest_blocks, height).await? {
11✔
245
                Some(hash) => hash,
11✔
246
                None => continue,
×
247
            };
248
            tip = tip.insert(BlockId { height, hash });
11✔
249
        }
5✔
250
    }
251

252
    // insert the most recent blocks at the tip to make sure we update the tip and make the update
253
    // robust.
254
    for (&height, &hash) in latest_blocks.iter() {
110✔
255
        tip = tip.insert(BlockId { height, hash });
110✔
256
    }
110✔
257

258
    Ok(tip)
11✔
259
}
11✔
260

261
/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning
262
/// `keychain_spks` against Esplora.
263
///
264
/// `keychain_spks` is an *unbounded* indexed-[`ScriptBuf`] iterator that represents scripts
265
/// derived from a keychain. The scanning logic stops after a `stop_gap` number of consecutive
266
/// scripts with no transaction history is reached. `parallel_requests` specifies the maximum
267
/// number of HTTP requests to make in parallel.
268
///
269
/// A [`TxGraph`] (containing the fetched transactions and anchors) and the last active
270
/// keychain index (if any) is returned. The last active keychain index is the keychain's last
271
/// script pubkey that contains a non-empty transaction history.
272
///
273
/// Refer to [crate-level docs](crate) for more.
274
async fn fetch_txs_with_keychain_spks<I: Iterator<Item = Indexed<ScriptBuf>> + Send>(
5✔
275
    client: &esplora_client::AsyncClient,
5✔
276
    inserted_txs: &mut HashSet<Txid>,
5✔
277
    mut keychain_spks: I,
5✔
278
    stop_gap: usize,
5✔
279
    parallel_requests: usize,
5✔
280
) -> Result<(tx_graph::Update<ConfirmationBlockTime>, Option<u32>), Error> {
5✔
281
    type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
5✔
282

5✔
283
    let mut update = tx_graph::Update::<ConfirmationBlockTime>::default();
5✔
284
    let mut last_index = Option::<u32>::None;
5✔
285
    let mut last_active_index = Option::<u32>::None;
5✔
286

287
    loop {
34✔
288
        let handles = keychain_spks
34✔
289
            .by_ref()
34✔
290
            .take(parallel_requests)
34✔
291
            .map(|(spk_index, spk)| {
34✔
292
                let client = client.clone();
32✔
293
                async move {
32✔
294
                    let mut last_seen = None;
32✔
295
                    let mut spk_txs = Vec::new();
32✔
296
                    loop {
297
                        let txs = client.scripthash_txs(&spk, last_seen).await?;
32✔
298
                        let tx_count = txs.len();
32✔
299
                        last_seen = txs.last().map(|tx| tx.txid);
32✔
300
                        spk_txs.extend(txs);
32✔
301
                        if tx_count < 25 {
32✔
302
                            break Result::<_, Error>::Ok((spk_index, spk_txs));
32✔
303
                        }
×
304
                    }
305
                }
32✔
306
            })
34✔
307
            .collect::<FuturesOrdered<_>>();
34✔
308

34✔
309
        if handles.is_empty() {
34✔
310
            break;
2✔
311
        }
32✔
312

313
        for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
64✔
314
            last_index = Some(index);
32✔
315
            if !txs.is_empty() {
32✔
316
                last_active_index = Some(index);
6✔
317
            }
26✔
318
            for tx in txs {
38✔
319
                if inserted_txs.insert(tx.txid) {
6✔
320
                    update.txs.push(tx.to_tx().into());
6✔
321
                }
6✔
322
                insert_anchor_from_status(&mut update, tx.txid, tx.status);
6✔
323
                insert_prevouts(&mut update, tx.vin);
6✔
324
            }
325
        }
326

327
        let last_index = last_index.expect("Must be set since handles wasn't empty.");
32✔
328
        let gap_limit_reached = if let Some(i) = last_active_index {
32✔
329
            last_index >= i.saturating_add(stop_gap as u32)
20✔
330
        } else {
331
            last_index + 1 >= stop_gap as u32
12✔
332
        };
333
        if gap_limit_reached {
32✔
334
            break;
3✔
335
        }
29✔
336
    }
337

338
    Ok((update, last_active_index))
5✔
339
}
5✔
340

341
/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning `spks`
342
/// against Esplora.
343
///
344
/// Unlike with [`EsploraAsyncExt::fetch_txs_with_keychain_spks`], `spks` must be *bounded* as
345
/// all contained scripts will be scanned. `parallel_requests` specifies the maximum number of
346
/// HTTP requests to make in parallel.
347
///
348
/// Refer to [crate-level docs](crate) for more.
349
async fn fetch_txs_with_spks<I: IntoIterator<Item = ScriptBuf> + Send>(
1✔
350
    client: &esplora_client::AsyncClient,
1✔
351
    inserted_txs: &mut HashSet<Txid>,
1✔
352
    spks: I,
1✔
353
    parallel_requests: usize,
1✔
354
) -> Result<tx_graph::Update<ConfirmationBlockTime>, Error>
1✔
355
where
1✔
356
    I::IntoIter: Send,
1✔
357
{
1✔
358
    fetch_txs_with_keychain_spks(
1✔
359
        client,
1✔
360
        inserted_txs,
1✔
361
        spks.into_iter().enumerate().map(|(i, spk)| (i as u32, spk)),
2✔
362
        usize::MAX,
1✔
363
        parallel_requests,
1✔
364
    )
1✔
365
    .await
4✔
366
    .map(|(update, _)| update)
1✔
367
}
1✔
368

369
/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning `txids`
370
/// against Esplora.
371
///
372
/// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel.
373
///
374
/// Refer to [crate-level docs](crate) for more.
375
async fn fetch_txs_with_txids<I: IntoIterator<Item = Txid> + Send>(
3✔
376
    client: &esplora_client::AsyncClient,
3✔
377
    inserted_txs: &mut HashSet<Txid>,
3✔
378
    txids: I,
3✔
379
    parallel_requests: usize,
3✔
380
) -> Result<tx_graph::Update<ConfirmationBlockTime>, Error>
3✔
381
where
3✔
382
    I::IntoIter: Send,
3✔
383
{
3✔
384
    let mut update = tx_graph::Update::<ConfirmationBlockTime>::default();
3✔
385
    // Only fetch for non-inserted txs.
3✔
386
    let mut txids = txids
3✔
387
        .into_iter()
3✔
388
        .filter(|txid| !inserted_txs.contains(txid))
3✔
389
        .collect::<Vec<Txid>>()
3✔
390
        .into_iter();
3✔
391
    loop {
3✔
392
        let handles = txids
3✔
393
            .by_ref()
3✔
394
            .take(parallel_requests)
3✔
395
            .map(|txid| {
3✔
396
                let client = client.clone();
×
NEW
397
                async move { client.get_tx_info(&txid).await.map(|t| (txid, t)) }
×
398
            })
3✔
399
            .collect::<FuturesOrdered<_>>();
3✔
400

3✔
401
        if handles.is_empty() {
3✔
402
            break;
3✔
403
        }
×
404

NEW
405
        for (txid, tx_info) in handles.try_collect::<Vec<_>>().await? {
×
NEW
406
            if let Some(tx_info) = tx_info {
×
NEW
407
                if inserted_txs.insert(txid) {
×
NEW
408
                    update.txs.push(tx_info.to_tx().into());
×
409
                }
×
NEW
410
                insert_anchor_from_status(&mut update, txid, tx_info.status);
×
NEW
411
                insert_prevouts(&mut update, tx_info.vin);
×
UNCOV
412
            }
×
413
        }
414
    }
415
    Ok(update)
3✔
416
}
3✔
417

418
/// Fetch transactions and [`ConfirmationBlockTime`]s that contain and spend the provided
419
/// `outpoints`.
420
///
421
/// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel.
422
///
423
/// Refer to [crate-level docs](crate) for more.
424
async fn fetch_txs_with_outpoints<I: IntoIterator<Item = OutPoint> + Send>(
1✔
425
    client: &esplora_client::AsyncClient,
1✔
426
    inserted_txs: &mut HashSet<Txid>,
1✔
427
    outpoints: I,
1✔
428
    parallel_requests: usize,
1✔
429
) -> Result<tx_graph::Update<ConfirmationBlockTime>, Error>
1✔
430
where
1✔
431
    I::IntoIter: Send,
1✔
432
{
1✔
433
    let outpoints = outpoints.into_iter().collect::<Vec<_>>();
1✔
434
    let mut update = tx_graph::Update::<ConfirmationBlockTime>::default();
1✔
435

1✔
436
    // make sure txs exists in graph and tx statuses are updated
1✔
437
    // TODO: We should maintain a tx cache (like we do with Electrum).
1✔
438
    update.extend(
1✔
439
        fetch_txs_with_txids(
1✔
440
            client,
1✔
441
            inserted_txs,
1✔
442
            outpoints.iter().copied().map(|op| op.txid),
1✔
443
            parallel_requests,
1✔
444
        )
1✔
NEW
445
        .await?,
×
446
    );
447

448
    // get outpoint spend-statuses
449
    let mut outpoints = outpoints.into_iter();
1✔
450
    let mut missing_txs = Vec::<Txid>::with_capacity(outpoints.len());
1✔
451
    loop {
1✔
452
        let handles = outpoints
1✔
453
            .by_ref()
1✔
454
            .take(parallel_requests)
1✔
455
            .map(|op| {
1✔
456
                let client = client.clone();
×
457
                async move { client.get_output_status(&op.txid, op.vout as _).await }
×
458
            })
1✔
459
            .collect::<FuturesOrdered<_>>();
1✔
460

1✔
461
        if handles.is_empty() {
1✔
462
            break;
1✔
463
        }
×
464

465
        for op_status in handles.try_collect::<Vec<_>>().await?.into_iter().flatten() {
×
466
            let spend_txid = match op_status.txid {
×
467
                Some(txid) => txid,
×
468
                None => continue,
×
469
            };
NEW
470
            if !inserted_txs.contains(&spend_txid) {
×
471
                missing_txs.push(spend_txid);
×
472
            }
×
473
            if let Some(spend_status) = op_status.status {
×
NEW
474
                insert_anchor_from_status(&mut update, spend_txid, spend_status);
×
475
            }
×
476
        }
477
    }
478

479
    update
1✔
480
        .extend(fetch_txs_with_txids(client, inserted_txs, missing_txs, parallel_requests).await?);
1✔
481
    Ok(update)
1✔
482
}
1✔
483

484
#[cfg(test)]
485
mod test {
486
    use std::{collections::BTreeSet, time::Duration};
487

488
    use bdk_chain::{
489
        bitcoin::{hashes::Hash, Txid},
490
        local_chain::LocalChain,
491
        BlockId,
492
    };
493
    use bdk_core::ConfirmationBlockTime;
494
    use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
495
    use esplora_client::Builder;
496

497
    use crate::async_ext::{chain_update, fetch_latest_blocks};
498

499
    macro_rules! h {
500
        ($index:literal) => {{
501
            bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
502
        }};
503
    }
504

505
    /// Ensure that update does not remove heights (from original), and all anchor heights are included.
506
    #[tokio::test]
507
    pub async fn test_finalize_chain_update() -> anyhow::Result<()> {
1✔
508
        struct TestCase<'a> {
1✔
509
            name: &'a str,
1✔
510
            /// Initial blockchain height to start the env with.
1✔
511
            initial_env_height: u32,
1✔
512
            /// Initial checkpoint heights to start with.
1✔
513
            initial_cps: &'a [u32],
1✔
514
            /// The final blockchain height of the env.
1✔
515
            final_env_height: u32,
1✔
516
            /// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
1✔
517
            /// the blockhash from the env.
1✔
518
            anchors: &'a [(u32, Txid)],
1✔
519
        }
1✔
520

1✔
521
        let test_cases = [
1✔
522
            TestCase {
1✔
523
                name: "chain_extends",
1✔
524
                initial_env_height: 60,
1✔
525
                initial_cps: &[59, 60],
1✔
526
                final_env_height: 90,
1✔
527
                anchors: &[],
1✔
528
            },
1✔
529
            TestCase {
1✔
530
                name: "introduce_older_heights",
1✔
531
                initial_env_height: 50,
1✔
532
                initial_cps: &[10, 15],
1✔
533
                final_env_height: 50,
1✔
534
                anchors: &[(11, h!("A")), (14, h!("B"))],
1✔
535
            },
1✔
536
            TestCase {
1✔
537
                name: "introduce_older_heights_after_chain_extends",
1✔
538
                initial_env_height: 50,
1✔
539
                initial_cps: &[10, 15],
1✔
540
                final_env_height: 100,
1✔
541
                anchors: &[(11, h!("A")), (14, h!("B"))],
1✔
542
            },
1✔
543
        ];
1✔
544

1✔
545
        for (i, t) in test_cases.into_iter().enumerate() {
3✔
546
            println!("[{}] running test case: {}", i, t.name);
3✔
547

1✔
548
            let env = TestEnv::new()?;
3✔
549
            let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
3✔
550
            let client = Builder::new(base_url.as_str()).build_async()?;
3✔
551

1✔
552
            // set env to `initial_env_height`
1✔
553
            if let Some(to_mine) = t
3✔
554
                .initial_env_height
3✔
555
                .checked_sub(env.make_checkpoint_tip().height())
3✔
556
            {
1✔
557
                env.mine_blocks(to_mine as _, None)?;
3✔
558
            }
1✔
559
            while client.get_height().await? < t.initial_env_height {
1,273✔
560
                std::thread::sleep(Duration::from_millis(10));
1,264✔
561
            }
1,264✔
562

1✔
563
            // craft initial `local_chain`
1✔
564
            let local_chain = {
3✔
565
                let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?);
3✔
566
                // force `chain_update_blocking` to add all checkpoints in `t.initial_cps`
1✔
567
                let anchors = t
3✔
568
                    .initial_cps
3✔
569
                    .iter()
3✔
570
                    .map(|&height| -> anyhow::Result<_> {
6✔
571
                        Ok((
6✔
572
                            ConfirmationBlockTime {
6✔
573
                                block_id: BlockId {
6✔
574
                                    height,
6✔
575
                                    hash: env.bitcoind.client.get_block_hash(height as _)?,
6✔
576
                                },
1✔
577
                                confirmation_time: height as _,
6✔
578
                            },
6✔
579
                            Txid::all_zeros(),
6✔
580
                        ))
1✔
581
                    })
6✔
582
                    .collect::<anyhow::Result<BTreeSet<_>>>()?;
3✔
583
                let update = chain_update(
3✔
584
                    &client,
3✔
585
                    &fetch_latest_blocks(&client).await?,
3✔
586
                    &chain.tip(),
3✔
587
                    &anchors,
3✔
588
                )
1✔
589
                .await?;
7✔
590
                chain.apply_update(update)?;
3✔
591
                chain
3✔
592
            };
3✔
593
            println!("local chain height: {}", local_chain.tip().height());
3✔
594

1✔
595
            // extend env chain
1✔
596
            if let Some(to_mine) = t
3✔
597
                .final_env_height
3✔
598
                .checked_sub(env.make_checkpoint_tip().height())
3✔
599
            {
1✔
600
                env.mine_blocks(to_mine as _, None)?;
3✔
601
            }
1✔
602
            while client.get_height().await? < t.final_env_height {
939✔
603
                std::thread::sleep(Duration::from_millis(10));
936✔
604
            }
936✔
605

1✔
606
            // craft update
1✔
607
            let update = {
3✔
608
                let anchors = t
3✔
609
                    .anchors
3✔
610
                    .iter()
3✔
611
                    .map(|&(height, txid)| -> anyhow::Result<_> {
4✔
612
                        Ok((
4✔
613
                            ConfirmationBlockTime {
4✔
614
                                block_id: BlockId {
4✔
615
                                    height,
4✔
616
                                    hash: env.bitcoind.client.get_block_hash(height as _)?,
4✔
617
                                },
1✔
618
                                confirmation_time: height as _,
4✔
619
                            },
4✔
620
                            txid,
4✔
621
                        ))
1✔
622
                    })
4✔
623
                    .collect::<anyhow::Result<_>>()?;
3✔
624
                chain_update(
1✔
625
                    &client,
3✔
626
                    &fetch_latest_blocks(&client).await?,
3✔
627
                    &local_chain.tip(),
3✔
628
                    &anchors,
3✔
629
                )
1✔
630
                .await?
6✔
631
            };
1✔
632

1✔
633
            // apply update
1✔
634
            let mut updated_local_chain = local_chain.clone();
3✔
635
            updated_local_chain.apply_update(update)?;
3✔
636
            println!(
3✔
637
                "updated local chain height: {}",
3✔
638
                updated_local_chain.tip().height()
3✔
639
            );
3✔
640

3✔
641
            assert!(
3✔
642
                {
3✔
643
                    let initial_heights = local_chain
3✔
644
                        .iter_checkpoints()
3✔
645
                        .map(|cp| cp.height())
37✔
646
                        .collect::<BTreeSet<_>>();
3✔
647
                    let updated_heights = updated_local_chain
3✔
648
                        .iter_checkpoints()
3✔
649
                        .map(|cp| cp.height())
61✔
650
                        .collect::<BTreeSet<_>>();
3✔
651
                    updated_heights.is_superset(&initial_heights)
3✔
652
                },
1✔
653
                "heights from the initial chain must all be in the updated chain",
1✔
654
            );
1✔
655

1✔
656
            assert!(
3✔
657
                {
3✔
658
                    let exp_anchor_heights = t
3✔
659
                        .anchors
3✔
660
                        .iter()
3✔
661
                        .map(|(h, _)| *h)
4✔
662
                        .chain(t.initial_cps.iter().copied())
3✔
663
                        .collect::<BTreeSet<_>>();
3✔
664
                    let anchor_heights = updated_local_chain
3✔
665
                        .iter_checkpoints()
3✔
666
                        .map(|cp| cp.height())
61✔
667
                        .collect::<BTreeSet<_>>();
3✔
668
                    anchor_heights.is_superset(&exp_anchor_heights)
3✔
669
                },
1✔
670
                "anchor heights must all be in updated chain",
1✔
671
            );
1✔
672
        }
1✔
673

1✔
674
        Ok(())
1✔
675
    }
1✔
676
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc