• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 19144348346

06 Nov 2025 05:30PM UTC coverage: 59.471% (+0.01%) from 59.461%
19144348346

push

github

SWvheerden
chore: new release v5.2.0-pre.2

68049 of 114423 relevant lines covered (59.47%)

301232.6 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

18.66
/base_layer/core/src/base_node/rpc/query_service.rs
1
// Copyright 2025 The Tari Project
2
// SPDX-License-Identifier: BSD-3-Clause
3

4
use std::cmp;
5

6
use log::trace;
7
use serde_valid::{validation, Validate};
8
use tari_common_types::{
9
    types,
10
    types::{FixedHash, FixedHashSizeError},
11
};
12
use tari_transaction_components::{
13
    rpc::{
14
        models,
15
        models::{
16
            BlockUtxoInfo,
17
            GenerateKernelMerkleProofResponse,
18
            GetUtxosByBlockRequest,
19
            GetUtxosByBlockResponse,
20
            MinimalUtxoSyncInfo,
21
            SyncUtxosByBlockRequest,
22
            SyncUtxosByBlockResponse,
23
            TipInfoResponse,
24
            TxLocation,
25
            TxQueryResponse,
26
        },
27
    },
28
    transaction_components::TransactionOutput,
29
};
30
use tari_utilities::{hex::Hex, ByteArray, ByteArrayError};
31
use thiserror::Error;
32

33
use crate::{
34
    base_node::{rpc::BaseNodeWalletQueryService, state_machine_service::states::StateInfo, StateMachineHandle},
35
    chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError},
36
    mempool::{service::MempoolHandle, MempoolServiceError, TxStorageResponse},
37
};
38

39
const LOG_TARGET: &str = "c::bn::rpc::query_service";
40
const SYNC_UTXOS_SPEND_TIP_SAFETY_LIMIT: u64 = 1000;
41

42
#[derive(Debug, Error)]
43
pub enum Error {
44
    #[error("Failed to get chain metadata: {0}")]
45
    FailedToGetChainMetadata(#[from] ChainStorageError),
46
    #[error("Header not found at height: {height}")]
47
    HeaderNotFound { height: u64 },
48
    #[error("Signature conversion error: {0}")]
49
    SignatureConversion(ByteArrayError),
50
    #[error("Mempool service error: {0}")]
51
    MempoolService(#[from] MempoolServiceError),
52
    #[error("Serde validation error: {0}")]
53
    SerdeValidation(#[from] validation::Errors),
54
    #[error("Hash conversion error: {0}")]
55
    HashConversion(#[from] FixedHashSizeError),
56
    #[error("Start header hash not found")]
57
    StartHeaderHashNotFound,
58
    #[error("End header hash not found")]
59
    EndHeaderHashNotFound,
60
    #[error("Header hash not found")]
61
    HeaderHashNotFound,
62
    #[error("Start header height {start_height} cannot be greater than the end header height {end_height}")]
63
    HeaderHeightMismatch { start_height: u64, end_height: u64 },
64
    #[error("A general error occurred: {0}")]
65
    General(anyhow::Error),
66
}
67

68
impl Error {
69
    fn general(err: impl Into<anyhow::Error>) -> Self {
×
70
        Error::General(err.into())
×
71
    }
×
72
}
73

74
pub struct Service<B> {
75
    db: AsyncBlockchainDb<B>,
76
    state_machine: StateMachineHandle,
77
    mempool: MempoolHandle,
78
}
79

80
impl<B: BlockchainBackend + 'static> Service<B> {
81
    pub fn new(db: AsyncBlockchainDb<B>, state_machine: StateMachineHandle, mempool: MempoolHandle) -> Self {
2✔
82
        Self {
2✔
83
            db,
2✔
84
            state_machine,
2✔
85
            mempool,
2✔
86
        }
2✔
87
    }
2✔
88

89
    fn state_machine(&self) -> StateMachineHandle {
×
90
        self.state_machine.clone()
×
91
    }
×
92

93
    fn db(&self) -> &AsyncBlockchainDb<B> {
3✔
94
        &self.db
3✔
95
    }
3✔
96

97
    fn mempool(&self) -> MempoolHandle {
×
98
        self.mempool.clone()
×
99
    }
×
100

101
    async fn fetch_kernel(&self, signature: types::CompressedSignature) -> Result<TxQueryResponse, Error> {
×
102
        let db = self.db();
×
103

104
        match db.fetch_kernel_by_excess_sig(signature.clone()).await? {
×
105
            None => (),
×
106
            Some((_, block_hash)) => match db.fetch_header_by_block_hash(block_hash).await? {
×
107
                None => (),
×
108
                Some(header) => {
×
109
                    let response = TxQueryResponse {
×
110
                        location: TxLocation::Mined,
×
111
                        mined_header_hash: Some(block_hash.to_vec()),
×
112
                        mined_height: Some(header.height),
×
113
                        mined_timestamp: Some(header.timestamp.as_u64()),
×
114
                    };
×
115
                    return Ok(response);
×
116
                },
117
            },
118
        };
119

120
        // If not in a block then check the mempool
121
        let mut mempool = self.mempool();
×
122
        let mempool_response = match mempool.get_tx_state_by_excess_sig(signature.clone()).await? {
×
123
            TxStorageResponse::UnconfirmedPool => TxQueryResponse {
×
124
                location: TxLocation::InMempool,
×
125
                mined_header_hash: None,
×
126
                mined_height: None,
×
127
                mined_timestamp: None,
×
128
            },
×
129
            TxStorageResponse::ReorgPool |
130
            TxStorageResponse::NotStoredOrphan |
131
            TxStorageResponse::NotStoredTimeLocked |
132
            TxStorageResponse::NotStoredAlreadySpent |
133
            TxStorageResponse::NotStoredConsensus |
134
            TxStorageResponse::NotStored |
135
            TxStorageResponse::NotStoredFeeTooLow |
136
            TxStorageResponse::NotStoredAlreadyMined => TxQueryResponse {
×
137
                location: TxLocation::NotStored,
×
138
                mined_timestamp: None,
×
139
                mined_height: None,
×
140
                mined_header_hash: None,
×
141
            },
×
142
        };
143

144
        Ok(mempool_response)
×
145
    }
×
146

147
    async fn fetch_utxos_by_block(&self, request: GetUtxosByBlockRequest) -> Result<GetUtxosByBlockResponse, Error> {
×
148
        request.validate()?;
×
149

150
        let hash = request.header_hash.clone().try_into()?;
×
151

152
        let header = self
×
153
            .db()
×
154
            .fetch_header_by_block_hash(hash)
×
155
            .await?
×
156
            .ok_or_else(|| Error::HeaderHashNotFound)?;
×
157

158
        // fetch utxos
159
        let outputs_with_statuses = self.db.fetch_outputs_in_block_with_spend_state(hash, None).await?;
×
160

161
        let outputs = outputs_with_statuses
×
162
            .into_iter()
×
163
            .map(|(output, _spent)| output)
×
164
            .collect::<Vec<TransactionOutput>>();
×
165

166
        // if its empty, we need to send an empty vec of outputs.
167
        let utxo_block_response = GetUtxosByBlockResponse {
×
168
            outputs,
×
169
            height: header.height,
×
170
            header_hash: hash.to_vec(),
×
171
            mined_timestamp: header.timestamp.as_u64(),
×
172
        };
×
173

174
        Ok(utxo_block_response)
×
175
    }
×
176

177
    #[allow(clippy::too_many_lines)]
178
    async fn fetch_utxos(&self, request: SyncUtxosByBlockRequest) -> Result<SyncUtxosByBlockResponse, Error> {
2✔
179
        // validate and fetch inputs
180
        request.validate()?;
2✔
181

182
        let hash = request.start_header_hash.clone().try_into()?;
2✔
183
        let start_header = self
2✔
184
            .db()
2✔
185
            .fetch_header_by_block_hash(hash)
2✔
186
            .await?
2✔
187
            .ok_or_else(|| Error::StartHeaderHashNotFound)?;
2✔
188

189
        let tip_header = self.db.fetch_tip_header().await?;
1✔
190
        // we only allow wallets to ask for a max of 100 blocks at a time and we want to cache the queries to ensure
191
        // they are in batch of 100 and we want to ensure they request goes to the nearest 100 block height so
192
        // we can cache all wallet's queries
193
        let increase = ((start_header.height + 100) / 100) * 100;
1✔
194
        let end_height = cmp::min(tip_header.header().height, increase);
1✔
195

196
        // pagination
197
        let start_header_height = start_header.height + (request.page * request.limit);
1✔
198
        if start_header_height > tip_header.header().height {
1✔
199
            return Err(Error::HeaderHeightMismatch {
1✔
200
                start_height: start_header.height,
1✔
201
                end_height: tip_header.header().height,
1✔
202
            });
1✔
203
        }
×
204
        let start_header = self
×
205
            .db
×
206
            .fetch_header(start_header_height)
×
207
            .await?
×
208
            .ok_or_else(|| Error::HeaderNotFound {
×
209
                height: start_header_height,
×
210
            })?;
×
211
        // fetch utxos
212
        let mut utxos = vec![];
×
213
        let mut current_header = start_header;
×
214
        let mut fetched_utxos = 0;
×
215
        let spending_end_header_hash = self
×
216
            .db
×
217
            .fetch_header(
×
218
                tip_header
×
219
                    .header()
×
220
                    .height
×
221
                    .saturating_sub(SYNC_UTXOS_SPEND_TIP_SAFETY_LIMIT),
×
222
            )
×
223
            .await?
×
224
            .ok_or_else(|| Error::HeaderNotFound {
×
225
                height: tip_header
×
226
                    .header()
×
227
                    .height
×
228
                    .saturating_sub(SYNC_UTXOS_SPEND_TIP_SAFETY_LIMIT),
×
229
            })?
×
230
            .hash();
×
231
        let next_header_to_request;
232
        let mut has_next_page = false;
×
233
        loop {
234
            let current_header_hash = current_header.hash();
×
235
            trace!(
×
236
                target: LOG_TARGET,
×
237
                "current header = {} ({})",
×
238
                current_header.height,
239
                current_header_hash.to_hex()
×
240
            );
241
            let outputs = if request.exclude_spent {
×
242
                self.db
×
243
                    .fetch_outputs_in_block_with_spend_state(current_header_hash, Some(spending_end_header_hash))
×
244
                    .await?
×
245
                    .into_iter()
×
246
                    .filter(|(_, spent)| !spent)
×
247
                    .map(|(output, _spent)| output)
×
248
                    .collect::<Vec<TransactionOutput>>()
×
249
            } else {
250
                self.db
×
251
                    .fetch_outputs_in_block_with_spend_state(current_header_hash, None)
×
252
                    .await?
×
253
                    .into_iter()
×
254
                    .map(|(output, _spent)| output)
×
255
                    .collect::<Vec<TransactionOutput>>()
×
256
            };
257
            let mut inputs = self
×
258
                .db
×
259
                .fetch_inputs_in_block(current_header_hash)
×
260
                .await?
×
261
                .into_iter()
×
262
                .map(|input| input.output_hash())
×
263
                .collect::<Vec<FixedHash>>();
×
264
            for output_chunk in outputs.chunks(2000) {
×
265
                let inputs_to_send = if inputs.is_empty() {
×
266
                    Vec::new()
×
267
                } else {
268
                    let num_to_drain = inputs.len().min(2000);
×
269
                    inputs.drain(..num_to_drain).map(|h| h.to_vec()).collect()
×
270
                };
271

272
                let output_block_response = BlockUtxoInfo {
×
273
                    outputs: output_chunk
×
274
                        .iter()
×
275
                        .map(|output| MinimalUtxoSyncInfo {
×
276
                            output_hash: output.hash().to_vec(),
×
277
                            commitment: output.commitment().to_vec(),
×
278
                            encrypted_data: output.encrypted_data().as_bytes().to_vec(),
×
279
                            sender_offset_public_key: output.sender_offset_public_key.to_vec(),
×
280
                        })
×
281
                        .collect(),
×
282
                    inputs: inputs_to_send,
×
283
                    height: current_header.height,
×
284
                    header_hash: current_header_hash.to_vec(),
×
285
                    mined_timestamp: current_header.timestamp.as_u64(),
×
286
                };
287
                utxos.push(output_block_response);
×
288
            }
289
            // We might still have inputs left to send if they are more than the outputs
290
            for input_chunk in inputs.chunks(2000) {
×
291
                let output_block_response = BlockUtxoInfo {
×
292
                    outputs: Vec::new(),
×
293
                    inputs: input_chunk.iter().map(|h| h.to_vec()).collect::<Vec<_>>().to_vec(),
×
294
                    height: current_header.height,
×
295
                    header_hash: current_header_hash.to_vec(),
×
296
                    mined_timestamp: current_header.timestamp.as_u64(),
×
297
                };
298
                utxos.push(output_block_response);
×
299
            }
300

301
            fetched_utxos += 1;
×
302

303
            if current_header.height >= tip_header.header().height {
×
304
                next_header_to_request = vec![];
×
305
                has_next_page = (end_height.saturating_sub(current_header.height)) > 0;
×
306
                break;
×
307
            }
×
308
            if fetched_utxos >= request.limit {
×
309
                next_header_to_request = current_header_hash.to_vec();
×
310
                // This is a special edge case, our request has reached the page limit, but we are also not done with
311
                // the block. We also dont want to split up the block over two requests. So we need to ensure that we
312
                // remove the partial block we added so that it can be requested fully in the next request. We also dont
313
                // want to get in a loop where the block cannot fit into the page limit, so if the block is the same as
314
                // the first one, we just send it as is, partial. If not we remove it and let it be sent in the next
315
                // request.
316
                if utxos.first().ok_or(Error::General(anyhow::anyhow!("No utxos founds")))? // should never happen as we always add at least one block
×
317
                    .header_hash ==
318
                    current_header_hash.to_vec()
×
319
                {
320
                    // special edge case where the first block is also the last block we can send, so we just send it as
321
                    // is, partial
322
                    break;
×
323
                }
×
324
                while !utxos.is_empty() &&
×
325
                    utxos.last().ok_or(Error::General(anyhow::anyhow!("No utxos found")))? // should never happen as we always add at least one block
×
326
                    .header_hash ==
327
                        current_header_hash.to_vec()
×
328
                {
×
329
                    utxos.pop();
×
330
                }
×
331
                break;
×
332
            }
×
333

334
            current_header =
×
335
                self.db
×
336
                    .fetch_header(current_header.height + 1)
×
337
                    .await?
×
338
                    .ok_or_else(|| Error::HeaderNotFound {
×
339
                        height: current_header.height + 1,
×
340
                    })?;
×
341
            if current_header.height == end_height {
×
342
                next_header_to_request = current_header.hash().to_vec();
×
343
                has_next_page = (end_height.saturating_sub(current_header.height)) > 0;
×
344
                break; // Stop if we reach the end height
×
345
            }
×
346
        }
347

348
        Ok(SyncUtxosByBlockResponse {
×
349
            blocks: utxos,
×
350
            has_next_page,
×
351
            next_header_to_scan: next_header_to_request,
×
352
        })
×
353
    }
2✔
354
}
355

356
#[async_trait::async_trait]
357
impl<B: BlockchainBackend + 'static> BaseNodeWalletQueryService for Service<B> {
358
    type Error = Error;
359

360
    async fn get_tip_info(&self) -> Result<TipInfoResponse, Self::Error> {
×
361
        let state_machine = self.state_machine();
×
362
        let status_watch = state_machine.get_status_info_watch();
×
363
        let is_synced = match status_watch.borrow().state_info {
×
364
            StateInfo::Listening(li) => li.is_synced(),
×
365
            _ => false,
×
366
        };
367

368
        let metadata = self.db.get_chain_metadata().await?;
×
369

370
        Ok(TipInfoResponse {
×
371
            metadata: Some(metadata),
×
372
            is_synced,
×
373
        })
×
374
    }
×
375

376
    async fn get_header_by_height(&self, height: u64) -> Result<models::BlockHeader, Self::Error> {
×
377
        let result = self
×
378
            .db
×
379
            .fetch_header(height)
×
380
            .await?
×
381
            .ok_or(Error::HeaderNotFound { height })?
×
382
            .into();
×
383
        Ok(result)
×
384
    }
×
385

386
    async fn get_height_at_time(&self, epoch_time: u64) -> Result<u64, Self::Error> {
×
387
        trace!(target: LOG_TARGET, "requested_epoch_time: {}", epoch_time);
×
388
        let tip_header = self.db.fetch_tip_header().await?;
×
389

390
        let mut left_height = 0u64;
×
391
        let mut right_height = tip_header.height();
×
392

393
        while left_height <= right_height {
×
394
            let mut mid_height = (left_height + right_height) / 2;
×
395

396
            if mid_height == 0 {
×
397
                return Ok(0u64);
×
398
            }
×
399
            // If the two bounds are adjacent then perform the test between the right and left sides
400
            if left_height == mid_height {
×
401
                mid_height = right_height;
×
402
            }
×
403

404
            let mid_header = self
×
405
                .db
×
406
                .fetch_header(mid_height)
×
407
                .await?
×
408
                .ok_or_else(|| Error::HeaderNotFound { height: mid_height })?;
×
409
            let before_mid_header = self
×
410
                .db
×
411
                .fetch_header(mid_height - 1)
×
412
                .await?
×
413
                .ok_or_else(|| Error::HeaderNotFound { height: mid_height - 1 })?;
×
414
            trace!(
×
415
                target: LOG_TARGET,
×
416
                "requested_epoch_time: {}, left: {}, mid: {}/{} ({}/{}), right: {}",
×
417
                epoch_time,
418
                left_height,
419
                mid_height,
420
                mid_height-1,
×
421
                mid_header.timestamp.as_u64(),
×
422
                before_mid_header.timestamp.as_u64(),
×
423
                right_height
424
            );
425
            if epoch_time < mid_header.timestamp.as_u64() && epoch_time >= before_mid_header.timestamp.as_u64() {
×
426
                trace!(
×
427
                    target: LOG_TARGET,
×
428
                    "requested_epoch_time: {}, selected height: {}",
×
429
                    epoch_time, before_mid_header.height
430
                );
431
                return Ok(before_mid_header.height);
×
432
            } else if mid_height == right_height {
×
433
                trace!(
×
434
                    target: LOG_TARGET,
×
435
                    "requested_epoch_time: {epoch_time}, selected height: {right_height}"
×
436
                );
437
                return Ok(right_height);
×
438
            } else if epoch_time <= mid_header.timestamp.as_u64() {
×
439
                right_height = mid_height;
×
440
            } else {
×
441
                left_height = mid_height;
×
442
            }
×
443
        }
444

445
        Ok(0u64)
×
446
    }
×
447

448
    async fn transaction_query(
449
        &self,
450
        signature: crate::base_node::rpc::models::Signature,
451
    ) -> Result<TxQueryResponse, Self::Error> {
×
452
        let signature = signature.try_into().map_err(Error::SignatureConversion)?;
×
453

454
        let response = self.fetch_kernel(signature).await?;
×
455

456
        Ok(response)
×
457
    }
×
458

459
    async fn sync_utxos_by_block(
460
        &self,
461
        request: SyncUtxosByBlockRequest,
462
    ) -> Result<SyncUtxosByBlockResponse, Self::Error> {
×
463
        self.fetch_utxos(request).await
×
464
    }
×
465

466
    async fn get_utxos_by_block(
467
        &self,
468
        request: GetUtxosByBlockRequest,
469
    ) -> Result<GetUtxosByBlockResponse, Self::Error> {
×
470
        self.fetch_utxos_by_block(request).await
×
471
    }
×
472

473
    async fn get_utxos_mined_info(
474
        &self,
475
        request: models::GetUtxosMinedInfoRequest,
476
    ) -> Result<models::GetUtxosMinedInfoResponse, Self::Error> {
×
477
        request.validate()?;
×
478

479
        let mut utxos = vec![];
×
480

481
        let tip_header = self.db().fetch_tip_header().await?;
×
482
        for hash in request.hashes {
×
483
            let hash = hash.try_into()?;
×
484
            let output = self.db().fetch_output(hash).await?;
×
485
            if let Some(output) = output {
×
486
                utxos.push(models::MinedUtxoInfo {
×
487
                    utxo_hash: hash.to_vec(),
×
488
                    mined_in_hash: output.header_hash.to_vec(),
×
489
                    mined_in_height: output.mined_height,
×
490
                    mined_in_timestamp: output.mined_timestamp,
×
491
                });
×
492
            }
×
493
        }
494

495
        Ok(models::GetUtxosMinedInfoResponse {
×
496
            utxos,
×
497
            best_block_hash: tip_header.hash().to_vec(),
×
498
            best_block_height: tip_header.height(),
×
499
        })
×
500
    }
×
501

502
    async fn get_utxos_deleted_info(
503
        &self,
504
        request: models::GetUtxosDeletedInfoRequest,
505
    ) -> Result<models::GetUtxosDeletedInfoResponse, Self::Error> {
×
506
        request.validate()?;
×
507

508
        let mut utxos = vec![];
×
509

510
        let must_include_header = request.must_include_header.clone().try_into()?;
×
511
        if self
×
512
            .db()
×
513
            .fetch_header_by_block_hash(must_include_header)
×
514
            .await?
×
515
            .is_none()
×
516
        {
517
            return Err(Error::HeaderHashNotFound);
×
518
        }
×
519

520
        let tip_header = self.db().fetch_tip_header().await?;
×
521
        for hash in request.hashes {
×
522
            let hash = hash.try_into()?;
×
523
            let output = self.db().fetch_output(hash).await?;
×
524

525
            if let Some(output) = output {
×
526
                // is it still unspent?
527
                let input = self.db().fetch_input(hash).await?;
×
528
                if let Some(i) = input {
×
529
                    utxos.push(models::DeletedUtxoInfo {
×
530
                        utxo_hash: hash.to_vec(),
×
531
                        found_in_header: Some((output.mined_height, output.header_hash.to_vec())),
×
532
                        spent_in_header: Some((i.spent_height, i.header_hash.to_vec())),
×
533
                    });
×
534
                } else {
×
535
                    utxos.push(models::DeletedUtxoInfo {
×
536
                        utxo_hash: hash.to_vec(),
×
537
                        found_in_header: Some((output.mined_height, output.header_hash.to_vec())),
×
538
                        spent_in_header: None,
×
539
                    });
×
540
                }
×
541
            } else {
×
542
                utxos.push(models::DeletedUtxoInfo {
×
543
                    utxo_hash: hash.to_vec(),
×
544
                    found_in_header: None,
×
545
                    spent_in_header: None,
×
546
                });
×
547
            }
×
548
        }
549

550
        Ok(models::GetUtxosDeletedInfoResponse {
×
551
            utxos,
×
552
            best_block_hash: tip_header.hash().to_vec(),
×
553
            best_block_height: tip_header.height(),
×
554
        })
×
555
    }
×
556

557
    async fn generate_kernel_merkle_proof(
558
        &self,
559
        excess_sig: types::CompressedSignature,
560
    ) -> Result<GenerateKernelMerkleProofResponse, Self::Error> {
×
561
        let proof = self.db().generate_kernel_merkle_proof(excess_sig).await?;
×
562
        Ok(GenerateKernelMerkleProofResponse {
563
            encoded_merkle_proof: bincode::serialize(&proof.merkle_proof).map_err(Error::general)?,
×
564
            block_hash: proof.block_hash,
×
565
            leaf_index: proof.leaf_index.value() as u64,
×
566
        })
567
    }
×
568
}
569

570
#[cfg(test)]
571
mod tests {
572
    use tari_common::configuration::Network;
573
    use tari_shutdown::Shutdown;
574

575
    use super::*;
576
    use crate::test_helpers::blockchain::create_new_blockchain_with_network;
577
    fn make_state_machine_handle() -> StateMachineHandle {
2✔
578
        use tokio::sync::{broadcast, watch};
579
        let (state_tx, _state_rx) = broadcast::channel(10);
2✔
580
        let (_status_tx, status_rx) =
2✔
581
            watch::channel(crate::base_node::state_machine_service::states::StatusInfo::new());
2✔
582
        let shutdown = Shutdown::new();
2✔
583
        StateMachineHandle::new(state_tx, status_rx, shutdown.to_signal())
2✔
584
    }
2✔
585

586
    fn make_mempool_handle() -> MempoolHandle {
2✔
587
        use crate::mempool::test_utils::mock::create_mempool_service_mock;
588
        let (handle, _state) = create_mempool_service_mock();
2✔
589
        handle
2✔
590
    }
2✔
591

592
    async fn make_service() -> Service<crate::test_helpers::blockchain::TempDatabase> {
2✔
593
        let db = create_new_blockchain_with_network(Network::default());
2✔
594
        let adb = AsyncBlockchainDb::from(db);
2✔
595
        let state_machine = make_state_machine_handle();
2✔
596
        let mempool = make_mempool_handle();
2✔
597
        Service::new(adb, state_machine, mempool)
2✔
598
    }
2✔
599

600
    #[tokio::test]
601
    async fn fetch_utxos_start_header_not_found() {
1✔
602
        let service = make_service().await;
1✔
603
        let req = SyncUtxosByBlockRequest {
1✔
604
            start_header_hash: vec![0xAB; 32],
1✔
605
            limit: 4,
1✔
606
            page: 0,
1✔
607
            exclude_spent: false,
1✔
608
        };
1✔
609
        let err = service.fetch_utxos(req).await.unwrap_err();
1✔
610
        match err {
1✔
611
            Error::StartHeaderHashNotFound => {},
1✔
612
            other => panic!("unexpected error: {other:?}"),
1✔
613
        }
1✔
614
    }
1✔
615

616
    #[tokio::test]
617
    async fn fetch_utxos_header_height_mismatch() {
1✔
618
        let service = make_service().await;
1✔
619
        let genesis = service.db().fetch_header(0).await.unwrap().unwrap();
1✔
620
        // page * limit moves start height beyond tip (0)
621
        let req = SyncUtxosByBlockRequest {
1✔
622
            start_header_hash: genesis.hash().to_vec(),
1✔
623
            limit: 1,
1✔
624
            page: 1,
1✔
625
            exclude_spent: false,
1✔
626
        };
1✔
627
        let err = service.fetch_utxos(req).await.unwrap_err();
1✔
628
        match err {
1✔
629
            Error::HeaderHeightMismatch { .. } => {},
1✔
630
            other => panic!("unexpected error: {other:?}"),
1✔
631
        }
1✔
632
    }
1✔
633
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc