• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 23184374285

17 Mar 2026 08:04AM UTC coverage: 60.967% (-0.8%) from 61.722%
23184374285

push

github

SWvheerden
chore: new release v5.3.0-pre.3

69750 of 114406 relevant lines covered (60.97%)

227024.12 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

20.87
/base_layer/core/src/base_node/rpc/query_service.rs
1
// Copyright 2025 The Tari Project
2
// SPDX-License-Identifier: BSD-3-Clause
3

4
use std::cmp;
5

6
use log::trace;
7
use serde_valid::{Validate, validation};
8
use tari_common_types::{
9
    types,
10
    types::{FixedHash, FixedHashSizeError},
11
};
12
use tari_transaction_components::{
13
    rpc::{
14
        models,
15
        models::{
16
            BlockUtxoInfo,
17
            GenerateKernelMerkleProofResponse,
18
            GetUtxosByBlockRequest,
19
            GetUtxosByBlockResponse,
20
            MinimalUtxoSyncInfo,
21
            SyncUtxosByBlockRequest,
22
            SyncUtxosByBlockResponseV0,
23
            SyncUtxosByBlockResponseV1,
24
            TipInfoResponse,
25
            TxLocation,
26
            TxQueryResponse,
27
        },
28
    },
29
    transaction_components::TransactionOutput,
30
};
31
use tari_utilities::{ByteArray, ByteArrayError, hex::Hex};
32
use thiserror::Error;
33

34
use crate::{
35
    base_node::{StateMachineHandle, rpc::BaseNodeWalletQueryService, state_machine_service::states::StateInfo},
36
    chain_storage::{BlockchainBackend, ChainStorageError, async_db::AsyncBlockchainDb},
37
    mempool::{MempoolServiceError, TxStorageResponse, service::MempoolHandle},
38
};
39

40
const LOG_TARGET: &str = "c::bn::rpc::query_service";
41
const SYNC_UTXOS_SPEND_TIP_SAFETY_LIMIT: u64 = 1000;
42
const WALLET_MAX_BLOCKS_PER_REQUEST: u64 = 100;
43
const MAX_UTXO_CHUNK_SIZE: usize = 2000;
44

45
#[derive(Debug, Error)]
46
pub enum Error {
47
    #[error("Failed to get chain metadata: {0}")]
48
    FailedToGetChainMetadata(#[from] ChainStorageError),
49
    #[error("Header not found at height: {height}")]
50
    HeaderNotFound { height: u64 },
51
    #[error("Signature conversion error: {0}")]
52
    SignatureConversion(ByteArrayError),
53
    #[error("Mempool service error: {0}")]
54
    MempoolService(#[from] MempoolServiceError),
55
    #[error("Serde validation error: {0}")]
56
    SerdeValidation(#[from] validation::Errors),
57
    #[error("Hash conversion error: {0}")]
58
    HashConversion(#[from] FixedHashSizeError),
59
    #[error("Start header hash not found")]
60
    StartHeaderHashNotFound,
61
    #[error("End header hash not found")]
62
    EndHeaderHashNotFound,
63
    #[error("Header hash not found")]
64
    HeaderHashNotFound,
65
    #[error("Start header height {start_height} cannot be greater than the end header height {end_height}")]
66
    HeaderHeightMismatch { start_height: u64, end_height: u64 },
67
    #[error("Output not found")]
68
    OutputNotFound,
69
    #[error("A general error occurred: {0}")]
70
    General(anyhow::Error),
71
}
72

73
impl Error {
74
    fn general(err: impl Into<anyhow::Error>) -> Self {
×
75
        Error::General(err.into())
×
76
    }
×
77
}
78

79
pub struct Service<B> {
80
    db: AsyncBlockchainDb<B>,
81
    state_machine: StateMachineHandle,
82
    mempool: MempoolHandle,
83
    max_utxo_chunk_size: usize,
84
}
85

86
impl<B: BlockchainBackend + 'static> Service<B> {
87
    pub fn new(db: AsyncBlockchainDb<B>, state_machine: StateMachineHandle, mempool: MempoolHandle) -> Self {
2✔
88
        Self {
2✔
89
            db,
2✔
90
            state_machine,
2✔
91
            mempool,
2✔
92
            max_utxo_chunk_size: MAX_UTXO_CHUNK_SIZE,
2✔
93
        }
2✔
94
    }
2✔
95

96
    fn state_machine(&self) -> StateMachineHandle {
×
97
        self.state_machine.clone()
×
98
    }
×
99

100
    fn db(&self) -> &AsyncBlockchainDb<B> {
3✔
101
        &self.db
3✔
102
    }
3✔
103

104
    fn mempool(&self) -> MempoolHandle {
×
105
        self.mempool.clone()
×
106
    }
×
107

108
    async fn fetch_kernel(&self, signature: types::CompressedSignature) -> Result<TxQueryResponse, Error> {
×
109
        let db = self.db();
×
110

111
        match db.fetch_kernel_by_excess_sig(signature.clone()).await? {
×
112
            None => (),
×
113
            Some((_, block_hash)) => match db.fetch_header_by_block_hash(block_hash).await? {
×
114
                None => (),
×
115
                Some(header) => {
×
116
                    let response = TxQueryResponse {
×
117
                        location: TxLocation::Mined,
×
118
                        mined_header_hash: Some(block_hash.to_vec()),
×
119
                        mined_height: Some(header.height),
×
120
                        mined_timestamp: Some(header.timestamp.as_u64()),
×
121
                    };
×
122
                    return Ok(response);
×
123
                },
124
            },
125
        };
126

127
        // If not in a block then check the mempool
128
        let mut mempool = self.mempool();
×
129
        let mempool_response = match mempool.get_tx_state_by_excess_sig(signature.clone()).await? {
×
130
            TxStorageResponse::UnconfirmedPool => TxQueryResponse {
×
131
                location: TxLocation::InMempool,
×
132
                mined_header_hash: None,
×
133
                mined_height: None,
×
134
                mined_timestamp: None,
×
135
            },
×
136
            TxStorageResponse::ReorgPool |
137
            TxStorageResponse::NotStoredOrphan |
138
            TxStorageResponse::NotStoredTimeLocked |
139
            TxStorageResponse::NotStoredAlreadySpent |
140
            TxStorageResponse::NotStoredConsensus |
141
            TxStorageResponse::NotStored |
142
            TxStorageResponse::NotStoredFeeTooLow |
143
            TxStorageResponse::NotStoredAlreadyMined => TxQueryResponse {
×
144
                location: TxLocation::NotStored,
×
145
                mined_timestamp: None,
×
146
                mined_height: None,
×
147
                mined_header_hash: None,
×
148
            },
×
149
        };
150

151
        Ok(mempool_response)
×
152
    }
×
153

154
    async fn fetch_utxos_by_block(&self, request: GetUtxosByBlockRequest) -> Result<GetUtxosByBlockResponse, Error> {
×
155
        request.validate()?;
×
156

157
        let hash = request.header_hash.clone().try_into()?;
×
158

159
        let header = self
×
160
            .db()
×
161
            .fetch_header_by_block_hash(hash)
×
162
            .await?
×
163
            .ok_or_else(|| Error::HeaderHashNotFound)?;
×
164

165
        // fetch utxos
166
        let outputs_with_statuses = self.db.fetch_outputs_in_block_with_spend_state(hash, None).await?;
×
167

168
        let outputs = outputs_with_statuses
×
169
            .into_iter()
×
170
            .map(|(output, _spent)| output)
×
171
            .collect::<Vec<TransactionOutput>>();
×
172

173
        // if its empty, we need to send an empty vec of outputs.
174
        let utxo_block_response = GetUtxosByBlockResponse {
×
175
            outputs,
×
176
            height: header.height,
×
177
            header_hash: hash.to_vec(),
×
178
            mined_timestamp: header.timestamp.as_u64(),
×
179
        };
×
180

181
        Ok(utxo_block_response)
×
182
    }
×
183

184
    #[allow(clippy::too_many_lines)]
185
    async fn fetch_utxos(&self, request: SyncUtxosByBlockRequest) -> Result<SyncUtxosByBlockResponseV0, Error> {
2✔
186
        // validate and fetch inputs
187
        request.validate()?;
2✔
188

189
        let hash = request.start_header_hash.clone().try_into()?;
2✔
190
        let start_header = self
2✔
191
            .db()
2✔
192
            .fetch_header_by_block_hash(hash)
2✔
193
            .await?
2✔
194
            .ok_or_else(|| Error::StartHeaderHashNotFound)?;
2✔
195

196
        let tip_header = self.db.fetch_tip_header().await?;
1✔
197
        // we only allow wallets to ask for a max of 100 blocks at a time and we want to cache the queries to ensure
198
        // they are in batch of 100 and we want to ensure they request goes to the nearest 100 block height so
199
        // we can cache all wallet's queries
200
        let increase = ((start_header.height + WALLET_MAX_BLOCKS_PER_REQUEST) / WALLET_MAX_BLOCKS_PER_REQUEST) *
1✔
201
            WALLET_MAX_BLOCKS_PER_REQUEST;
1✔
202
        let end_height = cmp::min(tip_header.header().height, increase);
1✔
203
        // pagination
204
        let start_header_height = start_header.height + (request.page * request.limit);
1✔
205
        if start_header_height > tip_header.header().height {
1✔
206
            return Err(Error::HeaderHeightMismatch {
1✔
207
                start_height: start_header.height,
1✔
208
                end_height: tip_header.header().height,
1✔
209
            });
1✔
210
        }
×
211
        let start_header = self
×
212
            .db
×
213
            .fetch_header(start_header_height)
×
214
            .await?
×
215
            .ok_or_else(|| Error::HeaderNotFound {
×
216
                height: start_header_height,
×
217
            })?;
×
218
        // fetch utxos
219
        let mut utxos = vec![];
×
220
        let next_page_start_height = start_header.height.saturating_add(request.limit);
×
221
        let mut current_header = start_header;
×
222
        let mut fetched_chunks = 0;
×
223
        let spending_end_header_hash = self
×
224
            .db
×
225
            .fetch_header(
×
226
                tip_header
×
227
                    .header()
×
228
                    .height
×
229
                    .saturating_sub(SYNC_UTXOS_SPEND_TIP_SAFETY_LIMIT),
×
230
            )
×
231
            .await?
×
232
            .ok_or_else(|| Error::HeaderNotFound {
×
233
                height: tip_header
×
234
                    .header()
×
235
                    .height
×
236
                    .saturating_sub(SYNC_UTXOS_SPEND_TIP_SAFETY_LIMIT),
×
237
            })?
×
238
            .hash();
×
239
        let next_header_to_request;
240
        let mut has_next_page = true;
×
241
        loop {
242
            let current_header_hash = current_header.hash();
×
243
            trace!(
×
244
                target: LOG_TARGET,
×
245
                "current header = {} ({})",
246
                current_header.height,
247
                current_header_hash.to_hex()
×
248
            );
249
            let outputs = if request.exclude_spent {
×
250
                self.db
×
251
                    .fetch_outputs_in_block_with_spend_state(current_header_hash, Some(spending_end_header_hash))
×
252
                    .await?
×
253
                    .into_iter()
×
254
                    .filter(|(_, spent)| !spent)
×
255
                    .map(|(output, _spent)| output)
×
256
                    .collect::<Vec<TransactionOutput>>()
×
257
            } else {
258
                self.db
×
259
                    .fetch_outputs_in_block_with_spend_state(current_header_hash, None)
×
260
                    .await?
×
261
                    .into_iter()
×
262
                    .map(|(output, _spent)| output)
×
263
                    .collect::<Vec<TransactionOutput>>()
×
264
            };
265
            let mut inputs = if request.exclude_inputs {
×
266
                Vec::new()
×
267
            } else {
268
                self.db
×
269
                    .fetch_inputs_in_block(current_header_hash)
×
270
                    .await?
×
271
                    .into_iter()
×
272
                    .map(|input| input.output_hash())
×
273
                    .collect::<Vec<FixedHash>>()
×
274
            };
275
            if outputs.is_empty() && inputs.is_empty() {
×
276
                // No outputs or inputs in this block, put empty placeholder here so wallet knows this height has been
×
277
                // scanned This can happen if all the outputs are spent and exclude_spent is true
×
278
                let block_response = BlockUtxoInfo {
×
279
                    outputs: Vec::new(),
×
280
                    inputs: Vec::new(),
×
281
                    height: current_header.height,
×
282
                    header_hash: current_header_hash.to_vec(),
×
283
                    mined_timestamp: current_header.timestamp.as_u64(),
×
284
                };
×
285
                utxos.push(block_response);
×
286
            }
×
287
            for output_chunk in outputs.chunks(self.max_utxo_chunk_size) {
×
288
                let inputs_to_send = if inputs.is_empty() {
×
289
                    Vec::new()
×
290
                } else {
291
                    let num_to_drain = inputs.len().min(self.max_utxo_chunk_size);
×
292
                    inputs.drain(..num_to_drain).map(|h| h.to_vec()).collect()
×
293
                };
294

295
                let output_block_response = BlockUtxoInfo {
×
296
                    outputs: output_chunk
×
297
                        .iter()
×
298
                        .map(|output| MinimalUtxoSyncInfo {
×
299
                            output_hash: output.hash().to_vec(),
×
300
                            commitment: output.commitment().to_vec(),
×
301
                            encrypted_data: output.encrypted_data().as_bytes().to_vec(),
×
302
                            sender_offset_public_key: output.sender_offset_public_key.to_vec(),
×
303
                        })
×
304
                        .collect(),
×
305
                    inputs: inputs_to_send,
×
306
                    height: current_header.height,
×
307
                    header_hash: current_header_hash.to_vec(),
×
308
                    mined_timestamp: current_header.timestamp.as_u64(),
×
309
                };
310
                utxos.push(output_block_response);
×
311
                fetched_chunks += 1;
×
312
            }
313
            // We might still have inputs left to send if they are more than the outputs
314
            for input_chunk in inputs.chunks(self.max_utxo_chunk_size) {
×
315
                let output_block_response = BlockUtxoInfo {
×
316
                    outputs: Vec::new(),
×
317
                    inputs: input_chunk.iter().map(|h| h.to_vec()).collect::<Vec<_>>().to_vec(),
×
318
                    height: current_header.height,
×
319
                    header_hash: current_header_hash.to_vec(),
×
320
                    mined_timestamp: current_header.timestamp.as_u64(),
×
321
                };
322
                utxos.push(output_block_response);
×
323
                fetched_chunks += 1;
×
324
            }
325

326
            if current_header.height >= tip_header.header().height {
×
327
                next_header_to_request = vec![];
×
328
                has_next_page = (end_height.saturating_sub(current_header.height)) > 0;
×
329
                break;
×
330
            }
×
331
            if fetched_chunks > request.limit {
×
332
                next_header_to_request = current_header_hash.to_vec();
×
333
                // This is a special edge case, our request has reached the page limit, but we are also not done with
334
                // the block. We also dont want to split up the block over two requests. So we need to ensure that we
335
                // remove the partial block we added so that it can be requested fully in the next request. We also dont
336
                // want to get in a loop where the block cannot fit into the page limit, so if the block is the same as
337
                // the first one, we just send it as is, partial. If not we remove it and let it be sent in the next
338
                // request.
339
                if utxos.first().ok_or(Error::General(anyhow::anyhow!("No utxos founds")))? // should never happen as we always add at least one block
×
340
                    .header_hash ==
341
                    current_header_hash.to_vec()
×
342
                {
343
                    // special edge case where the first block is also the last block we can send, so we just send it as
344
                    // is, partial
345
                    break;
×
346
                }
×
347
                while !utxos.is_empty() &&
×
348
                    utxos.last().ok_or(Error::General(anyhow::anyhow!("No utxos found")))? // should never happen as we always add at least one block
×
349
                    .header_hash ==
350
                        current_header_hash.to_vec()
×
351
                {
×
352
                    utxos.pop();
×
353
                }
×
354
                has_next_page = false;
×
355
                break;
×
356
            }
×
357
            if current_header.height + 1 > end_height {
×
358
                next_header_to_request = current_header.hash().to_vec();
×
359
                has_next_page = (end_height.saturating_sub(current_header.height)) > 0;
×
360
                break; // Stop if we reach the end height
×
361
            }
×
362
            current_header =
×
363
                self.db
×
364
                    .fetch_header(current_header.height + 1)
×
365
                    .await?
×
366
                    .ok_or_else(|| Error::HeaderNotFound {
×
367
                        height: current_header.height + 1,
×
368
                    })?;
×
369

370
            if current_header.height == next_page_start_height {
×
371
                // we are on the limit, stop here
372
                next_header_to_request = current_header.hash().to_vec();
×
373
                has_next_page = (end_height.saturating_sub(current_header.height)) > 0;
×
374
                break;
×
375
            }
×
376
        }
377
        Ok(SyncUtxosByBlockResponseV0 {
×
378
            blocks: utxos,
×
379
            has_next_page,
×
380
            next_header_to_scan: next_header_to_request,
×
381
        })
×
382
    }
2✔
383
}
384

385
#[async_trait::async_trait]
386
impl<B: BlockchainBackend + 'static> BaseNodeWalletQueryService for Service<B> {
387
    type Error = Error;
388

389
    async fn get_tip_info(&self) -> Result<TipInfoResponse, Self::Error> {
×
390
        let state_machine = self.state_machine();
×
391
        let status_watch = state_machine.get_status_info_watch();
×
392
        let is_synced = match status_watch.borrow().state_info {
×
393
            StateInfo::Listening(li) => li.is_synced(),
×
394
            _ => false,
×
395
        };
396

397
        let metadata = self.db.get_chain_metadata().await?;
×
398

399
        Ok(TipInfoResponse {
×
400
            metadata: Some(metadata),
×
401
            is_synced,
×
402
        })
×
403
    }
×
404

405
    async fn get_header_by_height(&self, height: u64) -> Result<models::BlockHeader, Self::Error> {
×
406
        let result = self
×
407
            .db
×
408
            .fetch_header(height)
×
409
            .await?
×
410
            .ok_or(Error::HeaderNotFound { height })?
×
411
            .into();
×
412
        Ok(result)
×
413
    }
×
414

415
    async fn get_height_at_time(&self, epoch_time: u64) -> Result<u64, Self::Error> {
×
416
        trace!(target: LOG_TARGET, "requested_epoch_time: {}", epoch_time);
×
417
        let tip_header = self.db.fetch_tip_header().await?;
×
418

419
        let mut left_height = 0u64;
×
420
        let mut right_height = tip_header.height();
×
421

422
        while left_height <= right_height {
×
423
            let mut mid_height = (left_height + right_height) / 2;
×
424

425
            if mid_height == 0 {
×
426
                return Ok(0u64);
×
427
            }
×
428
            // If the two bounds are adjacent then perform the test between the right and left sides
429
            if left_height == mid_height {
×
430
                mid_height = right_height;
×
431
            }
×
432

433
            let mid_header = self
×
434
                .db
×
435
                .fetch_header(mid_height)
×
436
                .await?
×
437
                .ok_or_else(|| Error::HeaderNotFound { height: mid_height })?;
×
438
            let before_mid_header = self
×
439
                .db
×
440
                .fetch_header(mid_height - 1)
×
441
                .await?
×
442
                .ok_or_else(|| Error::HeaderNotFound { height: mid_height - 1 })?;
×
443
            trace!(
×
444
                target: LOG_TARGET,
×
445
                "requested_epoch_time: {}, left: {}, mid: {}/{} ({}/{}), right: {}",
446
                epoch_time,
447
                left_height,
448
                mid_height,
449
                mid_height-1,
×
450
                mid_header.timestamp.as_u64(),
×
451
                before_mid_header.timestamp.as_u64(),
×
452
                right_height
453
            );
454
            if epoch_time < mid_header.timestamp.as_u64() && epoch_time >= before_mid_header.timestamp.as_u64() {
×
455
                trace!(
×
456
                    target: LOG_TARGET,
×
457
                    "requested_epoch_time: {}, selected height: {}",
458
                    epoch_time, before_mid_header.height
459
                );
460
                return Ok(before_mid_header.height);
×
461
            } else if mid_height == right_height {
×
462
                trace!(
×
463
                    target: LOG_TARGET,
×
464
                    "requested_epoch_time: {epoch_time}, selected height: {right_height}"
465
                );
466
                return Ok(right_height);
×
467
            } else if epoch_time <= mid_header.timestamp.as_u64() {
×
468
                right_height = mid_height;
×
469
            } else {
×
470
                left_height = mid_height;
×
471
            }
×
472
        }
473

474
        Ok(0u64)
×
475
    }
×
476

477
    async fn transaction_query(
478
        &self,
479
        signature: crate::base_node::rpc::models::Signature,
480
    ) -> Result<TxQueryResponse, Self::Error> {
×
481
        let signature = signature.try_into().map_err(Error::SignatureConversion)?;
×
482

483
        let response = self.fetch_kernel(signature).await?;
×
484

485
        Ok(response)
×
486
    }
×
487

488
    async fn sync_utxos_by_block_v0(
489
        &self,
490
        request: SyncUtxosByBlockRequest,
491
    ) -> Result<SyncUtxosByBlockResponseV0, Self::Error> {
×
492
        self.fetch_utxos(request).await
×
493
    }
×
494

495
    async fn sync_utxos_by_block_v1(
496
        &self,
497
        request: SyncUtxosByBlockRequest,
498
    ) -> Result<SyncUtxosByBlockResponseV1, Self::Error> {
×
499
        let v1 = self.fetch_utxos(request).await?;
×
500
        Ok(v1.into())
×
501
    }
×
502

503
    async fn get_utxos_by_block(
504
        &self,
505
        request: GetUtxosByBlockRequest,
506
    ) -> Result<GetUtxosByBlockResponse, Self::Error> {
×
507
        self.fetch_utxos_by_block(request).await
×
508
    }
×
509

510
    async fn get_utxos_mined_info(
511
        &self,
512
        request: models::GetUtxosMinedInfoRequest,
513
    ) -> Result<models::GetUtxosMinedInfoResponse, Self::Error> {
×
514
        request.validate()?;
×
515

516
        let mut utxos = vec![];
×
517

518
        let tip_header = self.db().fetch_tip_header().await?;
×
519
        for hash in request.hashes {
×
520
            let hash = hash.try_into()?;
×
521
            let output = self.db().fetch_output(hash).await?;
×
522
            if let Some(output) = output {
×
523
                utxos.push(models::MinedUtxoInfo {
×
524
                    utxo_hash: hash.to_vec(),
×
525
                    mined_in_hash: output.header_hash.to_vec(),
×
526
                    mined_in_height: output.mined_height,
×
527
                    mined_in_timestamp: output.mined_timestamp,
×
528
                });
×
529
            }
×
530
        }
531

532
        Ok(models::GetUtxosMinedInfoResponse {
×
533
            utxos,
×
534
            best_block_hash: tip_header.hash().to_vec(),
×
535
            best_block_height: tip_header.height(),
×
536
        })
×
537
    }
×
538

539
    async fn get_utxos_deleted_info(
540
        &self,
541
        request: models::GetUtxosDeletedInfoRequest,
542
    ) -> Result<models::GetUtxosDeletedInfoResponse, Self::Error> {
×
543
        request.validate()?;
×
544

545
        let mut utxos = vec![];
×
546

547
        let must_include_header = request.must_include_header.clone().try_into()?;
×
548
        if self
×
549
            .db()
×
550
            .fetch_header_by_block_hash(must_include_header)
×
551
            .await?
×
552
            .is_none()
×
553
        {
554
            return Err(Error::HeaderHashNotFound);
×
555
        }
×
556

557
        let tip_header = self.db().fetch_tip_header().await?;
×
558
        for hash in request.hashes {
×
559
            let hash = hash.try_into()?;
×
560
            let output = self.db().fetch_output(hash).await?;
×
561

562
            if let Some(output) = output {
×
563
                // is it still unspent?
564
                let input = self.db().fetch_input(hash).await?;
×
565
                if let Some(i) = input {
×
566
                    utxos.push(models::DeletedUtxoInfo {
×
567
                        utxo_hash: hash.to_vec(),
×
568
                        found_in_header: Some((output.mined_height, output.header_hash.to_vec())),
×
569
                        spent_in_header: Some((i.spent_height, i.header_hash.to_vec())),
×
570
                    });
×
571
                } else {
×
572
                    utxos.push(models::DeletedUtxoInfo {
×
573
                        utxo_hash: hash.to_vec(),
×
574
                        found_in_header: Some((output.mined_height, output.header_hash.to_vec())),
×
575
                        spent_in_header: None,
×
576
                    });
×
577
                }
×
578
            } else {
×
579
                utxos.push(models::DeletedUtxoInfo {
×
580
                    utxo_hash: hash.to_vec(),
×
581
                    found_in_header: None,
×
582
                    spent_in_header: None,
×
583
                });
×
584
            }
×
585
        }
586

587
        Ok(models::GetUtxosDeletedInfoResponse {
×
588
            utxos,
×
589
            best_block_hash: tip_header.hash().to_vec(),
×
590
            best_block_height: tip_header.height(),
×
591
        })
×
592
    }
×
593

594
    async fn generate_kernel_merkle_proof(
595
        &self,
596
        excess_sig: types::CompressedSignature,
597
    ) -> Result<GenerateKernelMerkleProofResponse, Self::Error> {
×
598
        let proof = self.db().generate_kernel_merkle_proof(excess_sig).await?;
×
599
        Ok(GenerateKernelMerkleProofResponse {
600
            encoded_merkle_proof: bincode::serialize(&proof.merkle_proof).map_err(Error::general)?,
×
601
            block_hash: proof.block_hash,
×
602
            leaf_index: proof.leaf_index.value() as u64,
×
603
        })
604
    }
×
605

606
    async fn get_utxo(&self, request: models::GetUtxoRequest) -> Result<Option<TransactionOutput>, Self::Error> {
×
607
        let hash: FixedHash = request.output_hash.try_into().map_err(Error::general)?;
×
608
        let outputs = self.db().fetch_outputs_with_spend_status_at_tip(vec![hash]).await?;
×
609
        let output = match outputs.first() {
×
610
            Some(Some((output, _spent))) => Some(output.clone()),
×
611
            _ => return Err(Error::OutputNotFound),
×
612
        };
613
        Ok(output)
×
614
    }
×
615

616
    async fn get_mempool_fee_per_gram_stats(&self, count: usize) -> Result<Vec<models::FeePerGramStat>, Self::Error> {
×
617
        if count > 20 {
×
618
            return Err(Error::general(anyhow::anyhow!(
×
619
                "count must be less than or equal to 20"
×
620
            )));
×
621
        }
×
622

623
        let metadata = self.db.get_chain_metadata().await?;
×
624
        let stats = self
×
625
            .mempool()
×
626
            .get_fee_per_gram_stats(count, metadata.best_block_height())
×
627
            .await
×
628
            .map_err(Error::general)?;
×
629

630
        Ok(stats)
×
631
    }
×
632
}
633

634
#[cfg(test)]
635
mod tests {
636
    #![allow(clippy::indexing_slicing)]
637
    use tari_common::configuration::Network;
638
    use tari_shutdown::Shutdown;
639

640
    use super::*;
641
    use crate::test_helpers::blockchain::create_new_blockchain_with_network;
642
    fn make_state_machine_handle() -> StateMachineHandle {
2✔
643
        use tokio::sync::{broadcast, watch};
644
        let (state_tx, _state_rx) = broadcast::channel(10);
2✔
645
        let (_status_tx, status_rx) =
2✔
646
            watch::channel(crate::base_node::state_machine_service::states::StatusInfo::new());
2✔
647
        let shutdown = Shutdown::new();
2✔
648
        StateMachineHandle::new(state_tx, status_rx, shutdown.to_signal())
2✔
649
    }
2✔
650

651
    fn make_mempool_handle() -> MempoolHandle {
2✔
652
        use crate::mempool::test_utils::mock::create_mempool_service_mock;
653
        let (handle, _state) = create_mempool_service_mock();
2✔
654
        handle
2✔
655
    }
2✔
656

657
    async fn make_service() -> Service<crate::test_helpers::blockchain::TempDatabase> {
2✔
658
        let db = create_new_blockchain_with_network(Network::LocalNet);
2✔
659
        let adb = AsyncBlockchainDb::from(db);
2✔
660
        let state_machine = make_state_machine_handle();
2✔
661
        let mempool = make_mempool_handle();
2✔
662
        Service::new(adb, state_machine, mempool)
2✔
663
    }
2✔
664

665
    #[tokio::test]
666
    async fn fetch_utxos_start_header_not_found() {
1✔
667
        let service = make_service().await;
1✔
668
        let req = SyncUtxosByBlockRequest {
1✔
669
            start_header_hash: vec![0xAB; 32],
1✔
670
            limit: 4,
1✔
671
            page: 0,
1✔
672
            exclude_spent: false,
1✔
673
            exclude_inputs: false,
1✔
674
            version: 0,
1✔
675
        };
1✔
676
        let err = service.fetch_utxos(req).await.unwrap_err();
1✔
677
        match err {
1✔
678
            Error::StartHeaderHashNotFound => {},
1✔
679
            other => panic!("unexpected error: {other:?}"),
1✔
680
        }
1✔
681
    }
1✔
682

683
    #[tokio::test]
684
    async fn fetch_utxos_header_height_mismatch() {
1✔
685
        let service = make_service().await;
1✔
686
        let genesis = service.db().fetch_header(0).await.unwrap().unwrap();
1✔
687
        // page * limit moves start height beyond tip (0)
688
        let req = SyncUtxosByBlockRequest {
1✔
689
            start_header_hash: genesis.hash().to_vec(),
1✔
690
            limit: 1,
1✔
691
            page: 1,
1✔
692
            exclude_spent: false,
1✔
693
            exclude_inputs: false,
1✔
694
            version: 0,
1✔
695
        };
1✔
696
        let err = service.fetch_utxos(req).await.unwrap_err();
1✔
697
        match err {
1✔
698
            Error::HeaderHeightMismatch { .. } => {},
1✔
699
            other => panic!("unexpected error: {other:?}"),
1✔
700
        }
1✔
701
    }
1✔
702

703
    #[tokio::test]
704
    async fn fetch_utxos_paginates_results() {
1✔
705
        use crate::test_helpers::blockchain::create_main_chain;
706

707
        // Build a small chain: GB -> A -> B -> C
708
        let db = create_new_blockchain_with_network(Network::LocalNet);
1✔
709
        let (_names, chain) = create_main_chain(&db, block_specs!(["A->GB"], ["B->A"], ["C->B"], ["D->C"]));
1✔
710

711
        // Construct the service over this DB
712
        let adb = AsyncBlockchainDb::from(db);
1✔
713
        let state_machine = make_state_machine_handle();
1✔
714
        let mempool = make_mempool_handle();
1✔
715
        let service = Service::new(adb, state_machine, mempool);
1✔
716

717
        // Use genesis as the start header hash
718
        let genesis = service.db().fetch_header(0).await.unwrap().unwrap();
1✔
719
        let g_hash = genesis.hash().to_vec();
×
720

721
        // Page 0, limit 1: should return only A (height 1) and next header should be B
722
        // genesis block in local net has 0 inputs and outputs
723
        let resp0 = service
×
724
            .fetch_utxos(SyncUtxosByBlockRequest {
×
725
                start_header_hash: g_hash.clone(),
×
726
                limit: 1,
×
727
                page: 0,
×
728
                exclude_spent: false,
×
729
                exclude_inputs: false,
×
730
                version: 0,
×
731
            })
×
732
            .await
×
733
            .expect("fetch_utxos page 0 should succeed");
×
734

735
        assert_eq!(resp0.blocks.len(), 1, "expected exactly one block in first page");
×
736
        assert_eq!(resp0.blocks[0].height, 0, "first page should start at genesis height");
×
737

738
        // Expect next_header_to_scan to be the hash of block A (height 1)
739
        let a_hash = chain.get("A").unwrap().hash().to_vec();
×
740
        assert_eq!(resp0.next_header_to_scan, a_hash, "next header should point to A");
×
741

742
        // Page 1, limit 1: should return only block A (height 1) and next header should be B
743
        let resp1 = service
×
744
            .fetch_utxos(SyncUtxosByBlockRequest {
×
745
                start_header_hash: g_hash.clone(),
×
746
                limit: 1,
×
747
                page: 1,
×
748
                exclude_spent: false,
×
749
                exclude_inputs: false,
×
750
                version: 0,
×
751
            })
×
752
            .await
×
753
            .expect("fetch_utxos page 1 should succeed");
×
754

755
        assert_eq!(resp1.blocks.len(), 1, "expected exactly one block in second page");
×
756
        assert_eq!(resp1.blocks[0].height, 1, "second page should start at height 1 (A)");
×
757

758
        // Expect next_header_to_scan to be the hash of block B (height 2)
759
        let b_hash = chain.get("B").unwrap().hash().to_vec();
×
760
        assert_eq!(resp1.next_header_to_scan, b_hash, "next header should point to B");
×
761

762
        // Page 2, limit 1: should return only block B (height 2) and next header should be C
763
        let resp2 = service
×
764
            .fetch_utxos(SyncUtxosByBlockRequest {
×
765
                start_header_hash: g_hash.clone(),
×
766
                limit: 1,
×
767
                page: 2,
×
768
                exclude_spent: false,
×
769
                exclude_inputs: false,
×
770
                version: 0,
×
771
            })
×
772
            .await
×
773
            .expect("fetch_utxos page 2 should succeed");
×
774

775
        assert_eq!(resp2.blocks.len(), 1, "expected exactly one block in third page");
×
776
        assert_eq!(resp2.blocks[0].height, 2, "third page should start at height 2 (B)");
×
777
        let c_hash = chain.get("C").unwrap().hash().to_vec();
×
778
        assert_eq!(resp2.next_header_to_scan, c_hash, "next header should point to C");
×
779

780
        // Page 3, limit 1: should return only block C (height 3) and next header should be empty (tip reached)
781
        let resp3 = service
×
782
            .fetch_utxos(SyncUtxosByBlockRequest {
×
783
                start_header_hash: g_hash.clone(),
×
784
                limit: 1,
×
785
                page: 3,
×
786
                exclude_spent: false,
×
787
                exclude_inputs: false,
×
788
                version: 0,
×
789
            })
×
790
            .await
×
791
            .expect("fetch_utxos page 3 should succeed");
×
792

793
        assert_eq!(resp3.blocks.len(), 1, "expected exactly one block in fourth page");
×
794
        assert_eq!(resp3.blocks[0].height, 3, "fourth page should start at height 3 (C)");
×
795
        let d_hash = chain.get("D").unwrap().hash().to_vec();
×
796
        assert_eq!(resp3.next_header_to_scan, d_hash, "next header should point to D");
×
797

798
        // Page 4, limit 1: should return only block C (height 3) and next header should be empty (tip reached)
799
        let resp4 = service
×
800
            .fetch_utxos(SyncUtxosByBlockRequest {
×
801
                start_header_hash: g_hash.clone(),
×
802
                limit: 1,
×
803
                page: 4,
×
804
                exclude_spent: false,
×
805
                exclude_inputs: false,
×
806
                version: 0,
×
807
            })
×
808
            .await
×
809
            .expect("fetch_utxos page 3 should succeed");
×
810

811
        assert_eq!(resp4.blocks.len(), 1, "expected exactly one block in fourth page");
×
812
        assert_eq!(resp4.blocks[0].height, 4, "fourth page should start at height 4 (D)");
×
813
        assert!(resp4.next_header_to_scan.is_empty(), "no next header at tip");
×
814
        let resp5 = service
×
815
            .fetch_utxos(SyncUtxosByBlockRequest {
×
816
                start_header_hash: g_hash,
×
817
                limit: 2,
×
818
                page: 0,
×
819
                exclude_spent: false,
×
820
                exclude_inputs: false,
×
821
                version: 0,
×
822
            })
×
823
            .await
×
824
            .expect("fetch_utxos should succeed");
×
825

826
        assert_eq!(resp5.blocks.len(), 2, "expected 2 blocks");
×
827
        assert_eq!(resp5.blocks[0].height, 0, "Should be block (GB)");
×
828
        assert_eq!(resp5.blocks[1].height, 1, "Should be block (A)");
×
829
        let b_hash = chain.get("B").unwrap().hash().to_vec();
×
830
        assert_eq!(resp5.next_header_to_scan, b_hash, "next header should point to B");
×
831
        assert!(resp5.has_next_page, "Should have more pages");
×
832
    }
1✔
833

834
    #[tokio::test]
835
    async fn large_fetch_utxo_paginates_results() {
1✔
836
        use crate::test_helpers::blockchain::create_main_chain;
837

838
        // Build a small chain: GB -> A -> B -> C
839
        let db = create_new_blockchain_with_network(Network::LocalNet);
1✔
840
        let (_names, chain) = create_main_chain(
1✔
841
            &db,
1✔
842
            block_specs!(
1✔
843
                ["1->GB"],
1✔
844
                ["2->1"],
1✔
845
                ["3->2"],
1✔
846
                ["4->3"],
1✔
847
                ["5->4"],
1✔
848
                ["6->5"],
1✔
849
                ["7->6"],
1✔
850
                ["8->7"],
1✔
851
                ["9->8"],
1✔
852
                ["10->9"],
1✔
853
                ["11->10"],
1✔
854
                ["12->11"]
1✔
855
            ),
1✔
856
        );
1✔
857

858
        // Construct the service over this DB
859
        let adb = AsyncBlockchainDb::from(db);
1✔
860
        let state_machine = make_state_machine_handle();
1✔
861
        let mempool = make_mempool_handle();
1✔
862
        let service = Service::new(adb, state_machine, mempool);
1✔
863

864
        // Use genesis as the start header hash
865
        let genesis = service.db().fetch_header(0).await.unwrap().unwrap();
1✔
866
        let g_hash = genesis.hash().to_vec();
×
867

868
        let resp = service
×
869
            .fetch_utxos(SyncUtxosByBlockRequest {
×
870
                start_header_hash: g_hash.clone(),
×
871
                limit: 10,
×
872
                page: 0,
×
873
                exclude_spent: false,
×
874
                exclude_inputs: false,
×
875
                version: 0,
×
876
            })
×
877
            .await
×
878
            .expect("fetch_utxos should succeed");
×
879

880
        assert_eq!(resp.blocks.len(), 10, "expected 10 blocks");
×
881
        assert_eq!(resp.blocks[0].height, 0, "Should be block (0)");
×
882
        assert_eq!(resp.blocks[1].height, 1, "Should be block (1)");
×
883
        assert_eq!(resp.blocks[2].height, 2, "Should be block (2)");
×
884
        assert_eq!(resp.blocks[3].height, 3, "Should be block (3)");
×
885
        assert_eq!(resp.blocks[4].height, 4, "Should be block (4)");
×
886
        assert_eq!(resp.blocks[5].height, 5, "Should be block (5)");
×
887
        assert_eq!(resp.blocks[6].height, 6, "Should be block (6)");
×
888
        assert_eq!(resp.blocks[7].height, 7, "Should be block (7)");
×
889
        assert_eq!(resp.blocks[8].height, 8, "Should be block (8)");
×
890
        assert_eq!(resp.blocks[9].height, 9, "Should be block (9)");
×
891
        let next_hash = chain.get("10").unwrap().hash().to_vec();
×
892
        assert_eq!(resp.next_header_to_scan, next_hash, "next header should point to 10");
×
893
        assert!(resp.has_next_page, "Should have more pages");
×
894
        let resp = service
×
895
            .fetch_utxos(SyncUtxosByBlockRequest {
×
896
                start_header_hash: g_hash,
×
897
                limit: 10,
×
898
                page: 1,
×
899
                exclude_spent: false,
×
900
                exclude_inputs: false,
×
901
                version: 0,
×
902
            })
×
903
            .await
×
904
            .expect("fetch_utxos should succeed");
×
905
        assert_eq!(resp.blocks.len(), 3, "expected 3 blocks");
×
906
        assert_eq!(resp.blocks[0].height, 10, "Should be block (10)");
×
907
        assert_eq!(resp.blocks[1].height, 11, "Should be block (11)");
×
908
        assert_eq!(resp.blocks[2].height, 12, "Should be block (12)");
×
909
        assert!(resp.next_header_to_scan.is_empty(), "Should be empty");
×
910
        assert!(!resp.has_next_page, "Should not have more pages");
×
911
    }
1✔
912

913
    // this will only run and work in esmeralda
914
    #[cfg(tari_target_network_testnet)]
915
    #[tokio::test]
916
    async fn large_utxo_handled_correctly() {
1✔
917
        use crate::test_helpers::blockchain::create_main_chain;
918

919
        // Build a small chain: GB -> A -> B -> C
920
        let db = create_new_blockchain_with_network(Network::Esmeralda);
1✔
921
        let (_names, _chain) = create_main_chain(
1✔
922
            &db,
1✔
923
            block_specs!(
1✔
924
                ["1->GB"],
1✔
925
                ["2->1"],
1✔
926
                ["3->2"],
1✔
927
                ["4->3"],
1✔
928
                ["5->4"],
1✔
929
                ["6->5"],
1✔
930
                ["7->6"],
1✔
931
                ["8->7"],
1✔
932
                ["9->8"],
1✔
933
                ["10->9"],
1✔
934
            ),
1✔
935
        );
1✔
936

937
        // Construct the service over this DB
938
        let adb = AsyncBlockchainDb::from(db);
1✔
939
        let state_machine = make_state_machine_handle();
1✔
940
        let mempool = make_mempool_handle();
1✔
941
        let mut service = Service::new(adb, state_machine, mempool);
1✔
942
        service.max_utxo_chunk_size = 500; // set small chunk size for testing
1✔
943

944
        // Use genesis as the start header hash
945
        let genesis = service.db().fetch_header(0).await.unwrap().unwrap();
1✔
946
        let g_hash = genesis.hash().to_vec();
×
947

948
        let resp = service
×
949
            .fetch_utxos(SyncUtxosByBlockRequest {
×
950
                start_header_hash: g_hash.clone(),
×
951
                limit: 5,
×
952
                page: 0,
×
953
                exclude_spent: false,
×
954
                exclude_inputs: false,
×
955
                version: 0,
×
956
            })
×
957
            .await
×
958
            .expect("fetch_utxos should succeed");
×
959

960
        assert_eq!(resp.blocks.len(), 5, "expected 5 blocks");
×
961
        assert_eq!(resp.blocks[0].height, 0, "Should be block (0)");
×
962
        assert_eq!(resp.blocks[1].height, 0, "Should be block (0)");
×
963
        assert_eq!(resp.blocks[2].height, 1, "Should be block (1)");
×
964
        assert_eq!(resp.blocks[3].height, 2, "Should be block (2)");
×
965
        assert_eq!(resp.blocks[4].height, 3, "Should be block (3)");
×
966
        let header_4 = service.db().fetch_header(4).await.unwrap().unwrap();
×
967
        assert_eq!(
×
968
            header_4.hash().to_vec(),
×
969
            resp.next_header_to_scan,
970
            "next header should point to 4"
971
        );
972
        assert!(!resp.has_next_page, "Should have no more pages");
×
973
    }
1✔
974
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc