• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 23151768217

16 Mar 2026 03:31PM UTC coverage: 61.722% (+0.03%) from 61.696%
23151768217

push

github

web-flow
fix: import-paper-wallet when base-dir is absolute (#7720)

When wallet paths are already absolute (set during initialization via
`set_base_path`), creating a temp wallet for `import-paper-wallet`
silently reused the original wallet's database instead of an isolated
temp one.

## Root Cause

`WalletConfig::set_base_path` is a no-op for already-absolute paths:

```rust
pub fn set_base_path<P: AsRef<Path>>(&mut self, base_path: P) {
    if !self.data_dir.is_absolute() {  // skipped if already absolute
        self.data_dir = base_path.as_ref().join(self.data_dir.as_path());
    }
    // ...
}
```

So `new_config.set_base_path(temp_path.clone())` did nothing when
`base-dir` was absolute, causing the temp wallet to open the same DB as
the running wallet.

## Fix

Replace the `set_base_path` call with direct path assignments in the
`ImportPaperWallet` handler:

```rust
let mut new_config = config.clone();
// set_base_path is a no-op for already-absolute paths; assign directly
new_config.data_dir = temp_path.clone();
new_config.config_dir = temp_path.join("config");
new_config.db_file = temp_path.join("console_wallet.db");
```

<!-- START COPILOT ORIGINAL PROMPT -->



<details>

<summary>Original prompt</summary>

> 
> ----
> 
> *This section details on the original issue you should resolve*
> 
> <issue_title>import-wallet only works when base-dir is
relative.</issue_title>
> <issue_description>**Describe the bug**
> You can use the `import-paper-wallet` command to sweep funds from a
wallet into your current wallet.
> It does this by creating a temporary wallet using (almost) the same
Config as your original wallet.
> 
> I say _almost_, because the `data_dir` and `db_file` entries get
"absolutised" somewhere during wallet initialisation.
> 
> But the temp wallet creation process assumes that paths are relative,
based on
> 
> `new_config.set_base_path(temp_path.clone());`
> 
> and 
> 
> ```rust
>     pub fn set_base_path<P: AsRef<Path>>(&mut self, base_path... (continued)

70610 of 114400 relevant lines covered (61.72%)

227141.15 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

60.66
/base_layer/core/src/base_node/rpc/query_service.rs
1
// Copyright 2025 The Tari Project
2
// SPDX-License-Identifier: BSD-3-Clause
3

4
use std::cmp;
5

6
use log::trace;
7
use serde_valid::{Validate, validation};
8
use tari_common_types::{
9
    types,
10
    types::{FixedHash, FixedHashSizeError},
11
};
12
use tari_transaction_components::{
13
    rpc::{
14
        models,
15
        models::{
16
            BlockUtxoInfo,
17
            GenerateKernelMerkleProofResponse,
18
            GetUtxosByBlockRequest,
19
            GetUtxosByBlockResponse,
20
            MinimalUtxoSyncInfo,
21
            SyncUtxosByBlockRequest,
22
            SyncUtxosByBlockResponseV0,
23
            SyncUtxosByBlockResponseV1,
24
            TipInfoResponse,
25
            TxLocation,
26
            TxQueryResponse,
27
        },
28
    },
29
    transaction_components::TransactionOutput,
30
};
31
use tari_utilities::{ByteArray, ByteArrayError, hex::Hex};
32
use thiserror::Error;
33

34
use crate::{
35
    base_node::{StateMachineHandle, rpc::BaseNodeWalletQueryService, state_machine_service::states::StateInfo},
36
    chain_storage::{BlockchainBackend, ChainStorageError, async_db::AsyncBlockchainDb},
37
    mempool::{MempoolServiceError, TxStorageResponse, service::MempoolHandle},
38
};
39

40
const LOG_TARGET: &str = "c::bn::rpc::query_service";
41
const SYNC_UTXOS_SPEND_TIP_SAFETY_LIMIT: u64 = 1000;
42
const WALLET_MAX_BLOCKS_PER_REQUEST: u64 = 100;
43
const MAX_UTXO_CHUNK_SIZE: usize = 2000;
44

45
#[derive(Debug, Error)]
46
pub enum Error {
47
    #[error("Failed to get chain metadata: {0}")]
48
    FailedToGetChainMetadata(#[from] ChainStorageError),
49
    #[error("Header not found at height: {height}")]
50
    HeaderNotFound { height: u64 },
51
    #[error("Signature conversion error: {0}")]
52
    SignatureConversion(ByteArrayError),
53
    #[error("Mempool service error: {0}")]
54
    MempoolService(#[from] MempoolServiceError),
55
    #[error("Serde validation error: {0}")]
56
    SerdeValidation(#[from] validation::Errors),
57
    #[error("Hash conversion error: {0}")]
58
    HashConversion(#[from] FixedHashSizeError),
59
    #[error("Start header hash not found")]
60
    StartHeaderHashNotFound,
61
    #[error("End header hash not found")]
62
    EndHeaderHashNotFound,
63
    #[error("Header hash not found")]
64
    HeaderHashNotFound,
65
    #[error("Start header height {start_height} cannot be greater than the end header height {end_height}")]
66
    HeaderHeightMismatch { start_height: u64, end_height: u64 },
67
    #[error("Output not found")]
68
    OutputNotFound,
69
    #[error("A general error occurred: {0}")]
70
    General(anyhow::Error),
71
}
72

73
impl Error {
74
    fn general(err: impl Into<anyhow::Error>) -> Self {
×
75
        Error::General(err.into())
×
76
    }
×
77
}
78

79
pub struct Service<B> {
80
    db: AsyncBlockchainDb<B>,
81
    state_machine: StateMachineHandle,
82
    mempool: MempoolHandle,
83
    max_utxo_chunk_size: usize,
84
}
85

86
impl<B: BlockchainBackend + 'static> Service<B> {
87
    pub fn new(db: AsyncBlockchainDb<B>, state_machine: StateMachineHandle, mempool: MempoolHandle) -> Self {
5✔
88
        Self {
5✔
89
            db,
5✔
90
            state_machine,
5✔
91
            mempool,
5✔
92
            max_utxo_chunk_size: MAX_UTXO_CHUNK_SIZE,
5✔
93
        }
5✔
94
    }
5✔
95

96
    fn state_machine(&self) -> StateMachineHandle {
×
97
        self.state_machine.clone()
×
98
    }
×
99

100
    fn db(&self) -> &AsyncBlockchainDb<B> {
16✔
101
        &self.db
16✔
102
    }
16✔
103

104
    fn mempool(&self) -> MempoolHandle {
×
105
        self.mempool.clone()
×
106
    }
×
107

108
    async fn fetch_kernel(&self, signature: types::CompressedSignature) -> Result<TxQueryResponse, Error> {
×
109
        let db = self.db();
×
110

111
        match db.fetch_kernel_by_excess_sig(signature.clone()).await? {
×
112
            None => (),
×
113
            Some((_, block_hash)) => match db.fetch_header_by_block_hash(block_hash).await? {
×
114
                None => (),
×
115
                Some(header) => {
×
116
                    let response = TxQueryResponse {
×
117
                        location: TxLocation::Mined,
×
118
                        mined_header_hash: Some(block_hash.to_vec()),
×
119
                        mined_height: Some(header.height),
×
120
                        mined_timestamp: Some(header.timestamp.as_u64()),
×
121
                    };
×
122
                    return Ok(response);
×
123
                },
124
            },
125
        };
126

127
        // If not in a block then check the mempool
128
        let mut mempool = self.mempool();
×
129
        let mempool_response = match mempool.get_tx_state_by_excess_sig(signature.clone()).await? {
×
130
            TxStorageResponse::UnconfirmedPool => TxQueryResponse {
×
131
                location: TxLocation::InMempool,
×
132
                mined_header_hash: None,
×
133
                mined_height: None,
×
134
                mined_timestamp: None,
×
135
            },
×
136
            TxStorageResponse::ReorgPool |
137
            TxStorageResponse::NotStoredOrphan |
138
            TxStorageResponse::NotStoredTimeLocked |
139
            TxStorageResponse::NotStoredAlreadySpent |
140
            TxStorageResponse::NotStoredConsensus |
141
            TxStorageResponse::NotStored |
142
            TxStorageResponse::NotStoredFeeTooLow |
143
            TxStorageResponse::NotStoredAlreadyMined => TxQueryResponse {
×
144
                location: TxLocation::NotStored,
×
145
                mined_timestamp: None,
×
146
                mined_height: None,
×
147
                mined_header_hash: None,
×
148
            },
×
149
        };
150

151
        Ok(mempool_response)
×
152
    }
×
153

154
    async fn fetch_utxos_by_block(&self, request: GetUtxosByBlockRequest) -> Result<GetUtxosByBlockResponse, Error> {
×
155
        request.validate()?;
×
156

157
        let hash = request.header_hash.clone().try_into()?;
×
158

159
        let header = self
×
160
            .db()
×
161
            .fetch_header_by_block_hash(hash)
×
162
            .await?
×
163
            .ok_or_else(|| Error::HeaderHashNotFound)?;
×
164

165
        // fetch utxos
166
        let outputs_with_statuses = self.db.fetch_outputs_in_block_with_spend_state(hash, None).await?;
×
167

168
        let outputs = outputs_with_statuses
×
169
            .into_iter()
×
170
            .map(|(output, _spent)| output)
×
171
            .collect::<Vec<TransactionOutput>>();
×
172

173
        // if its empty, we need to send an empty vec of outputs.
174
        let utxo_block_response = GetUtxosByBlockResponse {
×
175
            outputs,
×
176
            height: header.height,
×
177
            header_hash: hash.to_vec(),
×
178
            mined_timestamp: header.timestamp.as_u64(),
×
179
        };
×
180

181
        Ok(utxo_block_response)
×
182
    }
×
183

184
    #[allow(clippy::too_many_lines)]
185
    async fn fetch_utxos(&self, request: SyncUtxosByBlockRequest) -> Result<SyncUtxosByBlockResponseV0, Error> {
11✔
186
        // validate and fetch inputs
187
        request.validate()?;
11✔
188

189
        let hash = request.start_header_hash.clone().try_into()?;
11✔
190
        let start_header = self
11✔
191
            .db()
11✔
192
            .fetch_header_by_block_hash(hash)
11✔
193
            .await?
11✔
194
            .ok_or_else(|| Error::StartHeaderHashNotFound)?;
11✔
195

196
        let tip_header = self.db.fetch_tip_header().await?;
10✔
197
        // we only allow wallets to ask for a max of 100 blocks at a time and we want to cache the queries to ensure
198
        // they are in batch of 100 and we want to ensure they request goes to the nearest 100 block height so
199
        // we can cache all wallet's queries
200
        let increase = ((start_header.height + WALLET_MAX_BLOCKS_PER_REQUEST) / WALLET_MAX_BLOCKS_PER_REQUEST) *
10✔
201
            WALLET_MAX_BLOCKS_PER_REQUEST;
10✔
202
        let end_height = cmp::min(tip_header.header().height, increase);
10✔
203
        // pagination
204
        let start_header_height = start_header.height + (request.page * request.limit);
10✔
205
        if start_header_height > tip_header.header().height {
10✔
206
            return Err(Error::HeaderHeightMismatch {
1✔
207
                start_height: start_header.height,
1✔
208
                end_height: tip_header.header().height,
1✔
209
            });
1✔
210
        }
9✔
211
        let start_header = self
9✔
212
            .db
9✔
213
            .fetch_header(start_header_height)
9✔
214
            .await?
9✔
215
            .ok_or_else(|| Error::HeaderNotFound {
9✔
216
                height: start_header_height,
×
217
            })?;
×
218
        // fetch utxos
219
        let mut utxos = vec![];
9✔
220
        let next_page_start_height = start_header.height.saturating_add(request.limit);
9✔
221
        let mut current_header = start_header;
9✔
222
        let mut fetched_chunks = 0;
9✔
223
        let spending_end_header_hash = self
9✔
224
            .db
9✔
225
            .fetch_header(
9✔
226
                tip_header
9✔
227
                    .header()
9✔
228
                    .height
9✔
229
                    .saturating_sub(SYNC_UTXOS_SPEND_TIP_SAFETY_LIMIT),
9✔
230
            )
9✔
231
            .await?
9✔
232
            .ok_or_else(|| Error::HeaderNotFound {
9✔
233
                height: tip_header
×
234
                    .header()
×
235
                    .height
×
236
                    .saturating_sub(SYNC_UTXOS_SPEND_TIP_SAFETY_LIMIT),
×
237
            })?
×
238
            .hash();
9✔
239
        let next_header_to_request;
240
        let mut has_next_page = true;
9✔
241
        loop {
242
            let current_header_hash = current_header.hash();
25✔
243
            trace!(
25✔
244
                target: LOG_TARGET,
×
245
                "current header = {} ({})",
246
                current_header.height,
247
                current_header_hash.to_hex()
×
248
            );
249
            let outputs = if request.exclude_spent {
25✔
250
                self.db
×
251
                    .fetch_outputs_in_block_with_spend_state(current_header_hash, Some(spending_end_header_hash))
×
252
                    .await?
×
253
                    .into_iter()
×
254
                    .filter(|(_, spent)| !spent)
×
255
                    .map(|(output, _spent)| output)
×
256
                    .collect::<Vec<TransactionOutput>>()
×
257
            } else {
258
                self.db
25✔
259
                    .fetch_outputs_in_block_with_spend_state(current_header_hash, None)
25✔
260
                    .await?
25✔
261
                    .into_iter()
25✔
262
                    .map(|(output, _spent)| output)
25✔
263
                    .collect::<Vec<TransactionOutput>>()
25✔
264
            };
265
            let mut inputs = if request.exclude_inputs {
25✔
266
                Vec::new()
×
267
            } else {
268
                self.db
25✔
269
                    .fetch_inputs_in_block(current_header_hash)
25✔
270
                    .await?
25✔
271
                    .into_iter()
25✔
272
                    .map(|input| input.output_hash())
313✔
273
                    .collect::<Vec<FixedHash>>()
25✔
274
            };
275
            if outputs.is_empty() && inputs.is_empty() {
25✔
276
                // No outputs or inputs in this block, put empty placeholder here so wallet knows this height has been
3✔
277
                // scanned This can happen if all the outputs are spent and exclude_spent is true
3✔
278
                let block_response = BlockUtxoInfo {
3✔
279
                    outputs: Vec::new(),
3✔
280
                    inputs: Vec::new(),
3✔
281
                    height: current_header.height,
3✔
282
                    header_hash: current_header_hash.to_vec(),
3✔
283
                    mined_timestamp: current_header.timestamp.as_u64(),
3✔
284
                };
3✔
285
                utxos.push(block_response);
3✔
286
            }
22✔
287
            for output_chunk in outputs.chunks(self.max_utxo_chunk_size) {
25✔
288
                let inputs_to_send = if inputs.is_empty() {
23✔
289
                    Vec::new()
22✔
290
                } else {
291
                    let num_to_drain = inputs.len().min(self.max_utxo_chunk_size);
1✔
292
                    inputs.drain(..num_to_drain).map(|h| h.to_vec()).collect()
313✔
293
                };
294

295
                let output_block_response = BlockUtxoInfo {
23✔
296
                    outputs: output_chunk
23✔
297
                        .iter()
23✔
298
                        .map(|output| MinimalUtxoSyncInfo {
23✔
299
                            output_hash: output.hash().to_vec(),
815✔
300
                            commitment: output.commitment().to_vec(),
815✔
301
                            encrypted_data: output.encrypted_data().as_bytes().to_vec(),
815✔
302
                            sender_offset_public_key: output.sender_offset_public_key.to_vec(),
815✔
303
                        })
815✔
304
                        .collect(),
23✔
305
                    inputs: inputs_to_send,
23✔
306
                    height: current_header.height,
23✔
307
                    header_hash: current_header_hash.to_vec(),
23✔
308
                    mined_timestamp: current_header.timestamp.as_u64(),
23✔
309
                };
310
                utxos.push(output_block_response);
23✔
311
                fetched_chunks += 1;
23✔
312
            }
313
            // We might still have inputs left to send if they are more than the outputs
314
            for input_chunk in inputs.chunks(self.max_utxo_chunk_size) {
25✔
315
                let output_block_response = BlockUtxoInfo {
×
316
                    outputs: Vec::new(),
×
317
                    inputs: input_chunk.iter().map(|h| h.to_vec()).collect::<Vec<_>>().to_vec(),
×
318
                    height: current_header.height,
×
319
                    header_hash: current_header_hash.to_vec(),
×
320
                    mined_timestamp: current_header.timestamp.as_u64(),
×
321
                };
322
                utxos.push(output_block_response);
×
323
                fetched_chunks += 1;
×
324
            }
325

326
            if current_header.height >= tip_header.header().height {
25✔
327
                next_header_to_request = vec![];
2✔
328
                has_next_page = (end_height.saturating_sub(current_header.height)) > 0;
2✔
329
                break;
2✔
330
            }
23✔
331
            if fetched_chunks > request.limit {
23✔
332
                next_header_to_request = current_header_hash.to_vec();
1✔
333
                // This is a special edge case, our request has reached the page limit, but we are also not done with
334
                // the block. We also dont want to split up the block over two requests. So we need to ensure that we
335
                // remove the partial block we added so that it can be requested fully in the next request. We also dont
336
                // want to get in a loop where the block cannot fit into the page limit, so if the block is the same as
337
                // the first one, we just send it as is, partial. If not we remove it and let it be sent in the next
338
                // request.
339
                if utxos.first().ok_or(Error::General(anyhow::anyhow!("No utxos founds")))? // should never happen as we always add at least one block
1✔
340
                    .header_hash ==
341
                    current_header_hash.to_vec()
1✔
342
                {
343
                    // special edge case where the first block is also the last block we can send, so we just send it as
344
                    // is, partial
345
                    break;
×
346
                }
1✔
347
                while !utxos.is_empty() &&
2✔
348
                    utxos.last().ok_or(Error::General(anyhow::anyhow!("No utxos found")))? // should never happen as we always add at least one block
2✔
349
                    .header_hash ==
350
                        current_header_hash.to_vec()
2✔
351
                {
1✔
352
                    utxos.pop();
1✔
353
                }
1✔
354
                has_next_page = false;
1✔
355
                break;
1✔
356
            }
22✔
357
            if current_header.height + 1 > end_height {
22✔
358
                next_header_to_request = current_header.hash().to_vec();
×
359
                has_next_page = (end_height.saturating_sub(current_header.height)) > 0;
×
360
                break; // Stop if we reach the end height
×
361
            }
22✔
362
            current_header =
22✔
363
                self.db
22✔
364
                    .fetch_header(current_header.height + 1)
22✔
365
                    .await?
22✔
366
                    .ok_or_else(|| Error::HeaderNotFound {
22✔
367
                        height: current_header.height + 1,
×
368
                    })?;
×
369

370
            if current_header.height == next_page_start_height {
22✔
371
                // we are on the limit, stop here
372
                next_header_to_request = current_header.hash().to_vec();
6✔
373
                has_next_page = (end_height.saturating_sub(current_header.height)) > 0;
6✔
374
                break;
6✔
375
            }
16✔
376
        }
377
        Ok(SyncUtxosByBlockResponseV0 {
9✔
378
            blocks: utxos,
9✔
379
            has_next_page,
9✔
380
            next_header_to_scan: next_header_to_request,
9✔
381
        })
9✔
382
    }
11✔
383
}
384

385
#[async_trait::async_trait]
386
impl<B: BlockchainBackend + 'static> BaseNodeWalletQueryService for Service<B> {
387
    type Error = Error;
388

389
    async fn get_tip_info(&self) -> Result<TipInfoResponse, Self::Error> {
×
390
        let state_machine = self.state_machine();
×
391
        let status_watch = state_machine.get_status_info_watch();
×
392
        let is_synced = match status_watch.borrow().state_info {
×
393
            StateInfo::Listening(li) => li.is_synced(),
×
394
            _ => false,
×
395
        };
396

397
        let metadata = self.db.get_chain_metadata().await?;
×
398

399
        Ok(TipInfoResponse {
×
400
            metadata: Some(metadata),
×
401
            is_synced,
×
402
        })
×
403
    }
×
404

405
    async fn get_header_by_height(&self, height: u64) -> Result<models::BlockHeader, Self::Error> {
×
406
        let result = self
×
407
            .db
×
408
            .fetch_header(height)
×
409
            .await?
×
410
            .ok_or(Error::HeaderNotFound { height })?
×
411
            .into();
×
412
        Ok(result)
×
413
    }
×
414

415
    async fn get_height_at_time(&self, epoch_time: u64) -> Result<u64, Self::Error> {
×
416
        trace!(target: LOG_TARGET, "requested_epoch_time: {}", epoch_time);
×
417
        let tip_header = self.db.fetch_tip_header().await?;
×
418

419
        let mut left_height = 0u64;
×
420
        let mut right_height = tip_header.height();
×
421

422
        while left_height <= right_height {
×
423
            let mut mid_height = (left_height + right_height) / 2;
×
424

425
            if mid_height == 0 {
×
426
                return Ok(0u64);
×
427
            }
×
428
            // If the two bounds are adjacent then perform the test between the right and left sides
429
            if left_height == mid_height {
×
430
                mid_height = right_height;
×
431
            }
×
432

433
            let mid_header = self
×
434
                .db
×
435
                .fetch_header(mid_height)
×
436
                .await?
×
437
                .ok_or_else(|| Error::HeaderNotFound { height: mid_height })?;
×
438
            let before_mid_header = self
×
439
                .db
×
440
                .fetch_header(mid_height - 1)
×
441
                .await?
×
442
                .ok_or_else(|| Error::HeaderNotFound { height: mid_height - 1 })?;
×
443
            trace!(
×
444
                target: LOG_TARGET,
×
445
                "requested_epoch_time: {}, left: {}, mid: {}/{} ({}/{}), right: {}",
446
                epoch_time,
447
                left_height,
448
                mid_height,
449
                mid_height-1,
×
450
                mid_header.timestamp.as_u64(),
×
451
                before_mid_header.timestamp.as_u64(),
×
452
                right_height
453
            );
454
            if epoch_time < mid_header.timestamp.as_u64() && epoch_time >= before_mid_header.timestamp.as_u64() {
×
455
                trace!(
×
456
                    target: LOG_TARGET,
×
457
                    "requested_epoch_time: {}, selected height: {}",
458
                    epoch_time, before_mid_header.height
459
                );
460
                return Ok(before_mid_header.height);
×
461
            } else if mid_height == right_height {
×
462
                trace!(
×
463
                    target: LOG_TARGET,
×
464
                    "requested_epoch_time: {epoch_time}, selected height: {right_height}"
465
                );
466
                return Ok(right_height);
×
467
            } else if epoch_time <= mid_header.timestamp.as_u64() {
×
468
                right_height = mid_height;
×
469
            } else {
×
470
                left_height = mid_height;
×
471
            }
×
472
        }
473

474
        Ok(0u64)
×
475
    }
×
476

477
    async fn transaction_query(
478
        &self,
479
        signature: crate::base_node::rpc::models::Signature,
480
    ) -> Result<TxQueryResponse, Self::Error> {
×
481
        let signature = signature.try_into().map_err(Error::SignatureConversion)?;
×
482

483
        let response = self.fetch_kernel(signature).await?;
×
484

485
        Ok(response)
×
486
    }
×
487

488
    async fn sync_utxos_by_block_v0(
489
        &self,
490
        request: SyncUtxosByBlockRequest,
491
    ) -> Result<SyncUtxosByBlockResponseV0, Self::Error> {
×
492
        self.fetch_utxos(request).await
×
493
    }
×
494

495
    async fn sync_utxos_by_block_v1(
496
        &self,
497
        request: SyncUtxosByBlockRequest,
498
    ) -> Result<SyncUtxosByBlockResponseV1, Self::Error> {
×
499
        let v1 = self.fetch_utxos(request).await?;
×
500
        Ok(v1.into())
×
501
    }
×
502

503
    async fn get_utxos_by_block(
504
        &self,
505
        request: GetUtxosByBlockRequest,
506
    ) -> Result<GetUtxosByBlockResponse, Self::Error> {
×
507
        self.fetch_utxos_by_block(request).await
×
508
    }
×
509

510
    async fn get_utxos_mined_info(
511
        &self,
512
        request: models::GetUtxosMinedInfoRequest,
513
    ) -> Result<models::GetUtxosMinedInfoResponse, Self::Error> {
×
514
        request.validate()?;
×
515

516
        let mut utxos = vec![];
×
517

518
        let tip_header = self.db().fetch_tip_header().await?;
×
519
        for hash in request.hashes {
×
520
            let hash = hash.try_into()?;
×
521
            let output = self.db().fetch_output(hash).await?;
×
522
            if let Some(output) = output {
×
523
                utxos.push(models::MinedUtxoInfo {
×
524
                    utxo_hash: hash.to_vec(),
×
525
                    mined_in_hash: output.header_hash.to_vec(),
×
526
                    mined_in_height: output.mined_height,
×
527
                    mined_in_timestamp: output.mined_timestamp,
×
528
                });
×
529
            }
×
530
        }
531

532
        Ok(models::GetUtxosMinedInfoResponse {
×
533
            utxos,
×
534
            best_block_hash: tip_header.hash().to_vec(),
×
535
            best_block_height: tip_header.height(),
×
536
        })
×
537
    }
×
538

539
    async fn get_utxos_deleted_info(
540
        &self,
541
        request: models::GetUtxosDeletedInfoRequest,
542
    ) -> Result<models::GetUtxosDeletedInfoResponse, Self::Error> {
×
543
        request.validate()?;
×
544

545
        let mut utxos = vec![];
×
546

547
        let must_include_header = request.must_include_header.clone().try_into()?;
×
548
        if self
×
549
            .db()
×
550
            .fetch_header_by_block_hash(must_include_header)
×
551
            .await?
×
552
            .is_none()
×
553
        {
554
            return Err(Error::HeaderHashNotFound);
×
555
        }
×
556

557
        let tip_header = self.db().fetch_tip_header().await?;
×
558
        for hash in request.hashes {
×
559
            let hash = hash.try_into()?;
×
560
            let output = self.db().fetch_output(hash).await?;
×
561

562
            if let Some(output) = output {
×
563
                // is it still unspent?
564
                let input = self.db().fetch_input(hash).await?;
×
565
                if let Some(i) = input {
×
566
                    utxos.push(models::DeletedUtxoInfo {
×
567
                        utxo_hash: hash.to_vec(),
×
568
                        found_in_header: Some((output.mined_height, output.header_hash.to_vec())),
×
569
                        spent_in_header: Some((i.spent_height, i.header_hash.to_vec())),
×
570
                    });
×
571
                } else {
×
572
                    utxos.push(models::DeletedUtxoInfo {
×
573
                        utxo_hash: hash.to_vec(),
×
574
                        found_in_header: Some((output.mined_height, output.header_hash.to_vec())),
×
575
                        spent_in_header: None,
×
576
                    });
×
577
                }
×
578
            } else {
×
579
                utxos.push(models::DeletedUtxoInfo {
×
580
                    utxo_hash: hash.to_vec(),
×
581
                    found_in_header: None,
×
582
                    spent_in_header: None,
×
583
                });
×
584
            }
×
585
        }
586

587
        Ok(models::GetUtxosDeletedInfoResponse {
×
588
            utxos,
×
589
            best_block_hash: tip_header.hash().to_vec(),
×
590
            best_block_height: tip_header.height(),
×
591
        })
×
592
    }
×
593

594
    async fn generate_kernel_merkle_proof(
595
        &self,
596
        excess_sig: types::CompressedSignature,
597
    ) -> Result<GenerateKernelMerkleProofResponse, Self::Error> {
×
598
        let proof = self.db().generate_kernel_merkle_proof(excess_sig).await?;
×
599
        Ok(GenerateKernelMerkleProofResponse {
600
            encoded_merkle_proof: bincode::serialize(&proof.merkle_proof).map_err(Error::general)?,
×
601
            block_hash: proof.block_hash,
×
602
            leaf_index: proof.leaf_index.value() as u64,
×
603
        })
604
    }
×
605

606
    async fn get_utxo(&self, request: models::GetUtxoRequest) -> Result<Option<TransactionOutput>, Self::Error> {
×
607
        let hash: FixedHash = request.output_hash.try_into().map_err(Error::general)?;
×
608
        let outputs = self.db().fetch_outputs_with_spend_status_at_tip(vec![hash]).await?;
×
609
        let output = match outputs.first() {
×
610
            Some(Some((output, _spent))) => Some(output.clone()),
×
611
            _ => return Err(Error::OutputNotFound),
×
612
        };
613
        Ok(output)
×
614
    }
×
615

616
    async fn get_mempool_fee_per_gram_stats(&self, count: usize) -> Result<Vec<models::FeePerGramStat>, Self::Error> {
×
617
        if count > 20 {
×
618
            return Err(Error::general(anyhow::anyhow!(
×
619
                "count must be less than or equal to 20"
×
620
            )));
×
621
        }
×
622

623
        let metadata = self.db.get_chain_metadata().await?;
×
624
        let stats = self
×
625
            .mempool()
×
626
            .get_fee_per_gram_stats(count, metadata.best_block_height())
×
627
            .await
×
628
            .map_err(Error::general)?;
×
629

630
        Ok(stats)
×
631
    }
×
632
}
633

634
#[cfg(test)]
635
mod tests {
636
    #![allow(clippy::indexing_slicing)]
637
    use tari_common::configuration::Network;
638
    use tari_shutdown::Shutdown;
639

640
    use super::*;
641
    use crate::test_helpers::blockchain::create_new_blockchain_with_network;
642
    fn make_state_machine_handle() -> StateMachineHandle {
5✔
643
        use tokio::sync::{broadcast, watch};
644
        let (state_tx, _state_rx) = broadcast::channel(10);
5✔
645
        let (_status_tx, status_rx) =
5✔
646
            watch::channel(crate::base_node::state_machine_service::states::StatusInfo::new());
5✔
647
        let shutdown = Shutdown::new();
5✔
648
        StateMachineHandle::new(state_tx, status_rx, shutdown.to_signal())
5✔
649
    }
5✔
650

651
    fn make_mempool_handle() -> MempoolHandle {
5✔
652
        use crate::mempool::test_utils::mock::create_mempool_service_mock;
653
        let (handle, _state) = create_mempool_service_mock();
5✔
654
        handle
5✔
655
    }
5✔
656

657
    async fn make_service() -> Service<crate::test_helpers::blockchain::TempDatabase> {
2✔
658
        let db = create_new_blockchain_with_network(Network::LocalNet);
2✔
659
        let adb = AsyncBlockchainDb::from(db);
2✔
660
        let state_machine = make_state_machine_handle();
2✔
661
        let mempool = make_mempool_handle();
2✔
662
        Service::new(adb, state_machine, mempool)
2✔
663
    }
2✔
664

665
    #[tokio::test]
666
    async fn fetch_utxos_start_header_not_found() {
1✔
667
        let service = make_service().await;
1✔
668
        let req = SyncUtxosByBlockRequest {
1✔
669
            start_header_hash: vec![0xAB; 32],
1✔
670
            limit: 4,
1✔
671
            page: 0,
1✔
672
            exclude_spent: false,
1✔
673
            exclude_inputs: false,
1✔
674
            version: 0,
1✔
675
        };
1✔
676
        let err = service.fetch_utxos(req).await.unwrap_err();
1✔
677
        match err {
1✔
678
            Error::StartHeaderHashNotFound => {},
1✔
679
            other => panic!("unexpected error: {other:?}"),
1✔
680
        }
1✔
681
    }
1✔
682

683
    #[tokio::test]
684
    async fn fetch_utxos_header_height_mismatch() {
1✔
685
        let service = make_service().await;
1✔
686
        let genesis = service.db().fetch_header(0).await.unwrap().unwrap();
1✔
687
        // page * limit moves start height beyond tip (0)
688
        let req = SyncUtxosByBlockRequest {
1✔
689
            start_header_hash: genesis.hash().to_vec(),
1✔
690
            limit: 1,
1✔
691
            page: 1,
1✔
692
            exclude_spent: false,
1✔
693
            exclude_inputs: false,
1✔
694
            version: 0,
1✔
695
        };
1✔
696
        let err = service.fetch_utxos(req).await.unwrap_err();
1✔
697
        match err {
1✔
698
            Error::HeaderHeightMismatch { .. } => {},
1✔
699
            other => panic!("unexpected error: {other:?}"),
1✔
700
        }
1✔
701
    }
1✔
702

703
    #[tokio::test]
704
    async fn fetch_utxos_paginates_results() {
1✔
705
        use crate::test_helpers::blockchain::create_main_chain;
706

707
        // Build a small chain: GB -> A -> B -> C
708
        let db = create_new_blockchain_with_network(Network::LocalNet);
1✔
709
        let (_names, chain) = create_main_chain(&db, block_specs!(["A->GB"], ["B->A"], ["C->B"], ["D->C"]));
1✔
710

711
        // Construct the service over this DB
712
        let adb = AsyncBlockchainDb::from(db);
1✔
713
        let state_machine = make_state_machine_handle();
1✔
714
        let mempool = make_mempool_handle();
1✔
715
        let service = Service::new(adb, state_machine, mempool);
1✔
716

717
        // Use genesis as the start header hash
718
        let genesis = service.db().fetch_header(0).await.unwrap().unwrap();
1✔
719
        let g_hash = genesis.hash().to_vec();
1✔
720

721
        // Page 0, limit 1: should return only A (height 1) and next header should be B
722
        // genesis block in local net has 0 inputs and outputs
723
        let resp0 = service
1✔
724
            .fetch_utxos(SyncUtxosByBlockRequest {
1✔
725
                start_header_hash: g_hash.clone(),
1✔
726
                limit: 1,
1✔
727
                page: 0,
1✔
728
                exclude_spent: false,
1✔
729
                exclude_inputs: false,
1✔
730
                version: 0,
1✔
731
            })
1✔
732
            .await
1✔
733
            .expect("fetch_utxos page 0 should succeed");
1✔
734

735
        assert_eq!(resp0.blocks.len(), 1, "expected exactly one block in first page");
1✔
736
        assert_eq!(resp0.blocks[0].height, 0, "first page should start at genesis height");
1✔
737

738
        // Expect next_header_to_scan to be the hash of block A (height 1)
739
        let a_hash = chain.get("A").unwrap().hash().to_vec();
1✔
740
        assert_eq!(resp0.next_header_to_scan, a_hash, "next header should point to A");
1✔
741

742
        // Page 1, limit 1: should return only block A (height 1) and next header should be B
743
        let resp1 = service
1✔
744
            .fetch_utxos(SyncUtxosByBlockRequest {
1✔
745
                start_header_hash: g_hash.clone(),
1✔
746
                limit: 1,
1✔
747
                page: 1,
1✔
748
                exclude_spent: false,
1✔
749
                exclude_inputs: false,
1✔
750
                version: 0,
1✔
751
            })
1✔
752
            .await
1✔
753
            .expect("fetch_utxos page 1 should succeed");
1✔
754

755
        assert_eq!(resp1.blocks.len(), 1, "expected exactly one block in second page");
1✔
756
        assert_eq!(resp1.blocks[0].height, 1, "second page should start at height 1 (A)");
1✔
757

758
        // Expect next_header_to_scan to be the hash of block B (height 2)
759
        let b_hash = chain.get("B").unwrap().hash().to_vec();
1✔
760
        assert_eq!(resp1.next_header_to_scan, b_hash, "next header should point to B");
1✔
761

762
        // Page 2, limit 1: should return only block B (height 2) and next header should be C
763
        let resp2 = service
1✔
764
            .fetch_utxos(SyncUtxosByBlockRequest {
1✔
765
                start_header_hash: g_hash.clone(),
1✔
766
                limit: 1,
1✔
767
                page: 2,
1✔
768
                exclude_spent: false,
1✔
769
                exclude_inputs: false,
1✔
770
                version: 0,
1✔
771
            })
1✔
772
            .await
1✔
773
            .expect("fetch_utxos page 2 should succeed");
1✔
774

775
        assert_eq!(resp2.blocks.len(), 1, "expected exactly one block in third page");
1✔
776
        assert_eq!(resp2.blocks[0].height, 2, "third page should start at height 2 (B)");
1✔
777
        let c_hash = chain.get("C").unwrap().hash().to_vec();
1✔
778
        assert_eq!(resp2.next_header_to_scan, c_hash, "next header should point to C");
1✔
779

780
        // Page 3, limit 1: should return only block C (height 3) and next header should be empty (tip reached)
781
        let resp3 = service
1✔
782
            .fetch_utxos(SyncUtxosByBlockRequest {
1✔
783
                start_header_hash: g_hash.clone(),
1✔
784
                limit: 1,
1✔
785
                page: 3,
1✔
786
                exclude_spent: false,
1✔
787
                exclude_inputs: false,
1✔
788
                version: 0,
1✔
789
            })
1✔
790
            .await
1✔
791
            .expect("fetch_utxos page 3 should succeed");
1✔
792

793
        assert_eq!(resp3.blocks.len(), 1, "expected exactly one block in fourth page");
1✔
794
        assert_eq!(resp3.blocks[0].height, 3, "fourth page should start at height 3 (C)");
1✔
795
        let d_hash = chain.get("D").unwrap().hash().to_vec();
1✔
796
        assert_eq!(resp3.next_header_to_scan, d_hash, "next header should point to D");
1✔
797

798
        // Page 4, limit 1: should return only block C (height 3) and next header should be empty (tip reached)
799
        let resp4 = service
1✔
800
            .fetch_utxos(SyncUtxosByBlockRequest {
1✔
801
                start_header_hash: g_hash.clone(),
1✔
802
                limit: 1,
1✔
803
                page: 4,
1✔
804
                exclude_spent: false,
1✔
805
                exclude_inputs: false,
1✔
806
                version: 0,
1✔
807
            })
1✔
808
            .await
1✔
809
            .expect("fetch_utxos page 3 should succeed");
1✔
810

811
        assert_eq!(resp4.blocks.len(), 1, "expected exactly one block in fourth page");
1✔
812
        assert_eq!(resp4.blocks[0].height, 4, "fourth page should start at height 4 (D)");
1✔
813
        assert!(resp4.next_header_to_scan.is_empty(), "no next header at tip");
1✔
814
        let resp5 = service
1✔
815
            .fetch_utxos(SyncUtxosByBlockRequest {
1✔
816
                start_header_hash: g_hash,
1✔
817
                limit: 2,
1✔
818
                page: 0,
1✔
819
                exclude_spent: false,
1✔
820
                exclude_inputs: false,
1✔
821
                version: 0,
1✔
822
            })
1✔
823
            .await
1✔
824
            .expect("fetch_utxos should succeed");
1✔
825

826
        assert_eq!(resp5.blocks.len(), 2, "expected 2 blocks");
1✔
827
        assert_eq!(resp5.blocks[0].height, 0, "Should be block (GB)");
1✔
828
        assert_eq!(resp5.blocks[1].height, 1, "Should be block (A)");
1✔
829
        let b_hash = chain.get("B").unwrap().hash().to_vec();
1✔
830
        assert_eq!(resp5.next_header_to_scan, b_hash, "next header should point to B");
1✔
831
        assert!(resp5.has_next_page, "Should have more pages");
1✔
832
    }
1✔
833

834
    #[tokio::test]
835
    async fn large_fetch_utxo_paginates_results() {
1✔
836
        use crate::test_helpers::blockchain::create_main_chain;
837

838
        // Build a small chain: GB -> A -> B -> C
839
        let db = create_new_blockchain_with_network(Network::LocalNet);
1✔
840
        let (_names, chain) = create_main_chain(
1✔
841
            &db,
1✔
842
            block_specs!(
1✔
843
                ["1->GB"],
1✔
844
                ["2->1"],
1✔
845
                ["3->2"],
1✔
846
                ["4->3"],
1✔
847
                ["5->4"],
1✔
848
                ["6->5"],
1✔
849
                ["7->6"],
1✔
850
                ["8->7"],
1✔
851
                ["9->8"],
1✔
852
                ["10->9"],
1✔
853
                ["11->10"],
1✔
854
                ["12->11"]
1✔
855
            ),
1✔
856
        );
1✔
857

858
        // Construct the service over this DB
859
        let adb = AsyncBlockchainDb::from(db);
1✔
860
        let state_machine = make_state_machine_handle();
1✔
861
        let mempool = make_mempool_handle();
1✔
862
        let service = Service::new(adb, state_machine, mempool);
1✔
863

864
        // Use genesis as the start header hash
865
        let genesis = service.db().fetch_header(0).await.unwrap().unwrap();
1✔
866
        let g_hash = genesis.hash().to_vec();
1✔
867

868
        let resp = service
1✔
869
            .fetch_utxos(SyncUtxosByBlockRequest {
1✔
870
                start_header_hash: g_hash.clone(),
1✔
871
                limit: 10,
1✔
872
                page: 0,
1✔
873
                exclude_spent: false,
1✔
874
                exclude_inputs: false,
1✔
875
                version: 0,
1✔
876
            })
1✔
877
            .await
1✔
878
            .expect("fetch_utxos should succeed");
1✔
879

880
        assert_eq!(resp.blocks.len(), 10, "expected 10 blocks");
1✔
881
        assert_eq!(resp.blocks[0].height, 0, "Should be block (0)");
1✔
882
        assert_eq!(resp.blocks[1].height, 1, "Should be block (1)");
1✔
883
        assert_eq!(resp.blocks[2].height, 2, "Should be block (2)");
1✔
884
        assert_eq!(resp.blocks[3].height, 3, "Should be block (3)");
1✔
885
        assert_eq!(resp.blocks[4].height, 4, "Should be block (4)");
1✔
886
        assert_eq!(resp.blocks[5].height, 5, "Should be block (5)");
1✔
887
        assert_eq!(resp.blocks[6].height, 6, "Should be block (6)");
1✔
888
        assert_eq!(resp.blocks[7].height, 7, "Should be block (7)");
1✔
889
        assert_eq!(resp.blocks[8].height, 8, "Should be block (8)");
1✔
890
        assert_eq!(resp.blocks[9].height, 9, "Should be block (9)");
1✔
891
        let next_hash = chain.get("10").unwrap().hash().to_vec();
1✔
892
        assert_eq!(resp.next_header_to_scan, next_hash, "next header should point to 10");
1✔
893
        assert!(resp.has_next_page, "Should have more pages");
1✔
894
        let resp = service
1✔
895
            .fetch_utxos(SyncUtxosByBlockRequest {
1✔
896
                start_header_hash: g_hash,
1✔
897
                limit: 10,
1✔
898
                page: 1,
1✔
899
                exclude_spent: false,
1✔
900
                exclude_inputs: false,
1✔
901
                version: 0,
1✔
902
            })
1✔
903
            .await
1✔
904
            .expect("fetch_utxos should succeed");
1✔
905
        assert_eq!(resp.blocks.len(), 3, "expected 3 blocks");
1✔
906
        assert_eq!(resp.blocks[0].height, 10, "Should be block (10)");
1✔
907
        assert_eq!(resp.blocks[1].height, 11, "Should be block (11)");
1✔
908
        assert_eq!(resp.blocks[2].height, 12, "Should be block (12)");
1✔
909
        assert!(resp.next_header_to_scan.is_empty(), "Should be empty");
1✔
910
        assert!(!resp.has_next_page, "Should not have more pages");
1✔
911
    }
1✔
912

913
    // this will only run and work in esmeralda
914
    #[cfg(tari_target_network_testnet)]
915
    #[tokio::test]
916
    async fn large_utxo_handled_correctly() {
1✔
917
        use crate::test_helpers::blockchain::create_main_chain;
918

919
        // Build a small chain: GB -> A -> B -> C
920
        let db = create_new_blockchain_with_network(Network::Esmeralda);
1✔
921
        let (_names, _chain) = create_main_chain(
1✔
922
            &db,
1✔
923
            block_specs!(
1✔
924
                ["1->GB"],
1✔
925
                ["2->1"],
1✔
926
                ["3->2"],
1✔
927
                ["4->3"],
1✔
928
                ["5->4"],
1✔
929
                ["6->5"],
1✔
930
                ["7->6"],
1✔
931
                ["8->7"],
1✔
932
                ["9->8"],
1✔
933
                ["10->9"],
1✔
934
            ),
1✔
935
        );
1✔
936

937
        // Construct the service over this DB
938
        let adb = AsyncBlockchainDb::from(db);
1✔
939
        let state_machine = make_state_machine_handle();
1✔
940
        let mempool = make_mempool_handle();
1✔
941
        let mut service = Service::new(adb, state_machine, mempool);
1✔
942
        service.max_utxo_chunk_size = 500; // set small chunk size for testing
1✔
943

944
        // Use genesis as the start header hash
945
        let genesis = service.db().fetch_header(0).await.unwrap().unwrap();
1✔
946
        let g_hash = genesis.hash().to_vec();
1✔
947

948
        let resp = service
1✔
949
            .fetch_utxos(SyncUtxosByBlockRequest {
1✔
950
                start_header_hash: g_hash.clone(),
1✔
951
                limit: 5,
1✔
952
                page: 0,
1✔
953
                exclude_spent: false,
1✔
954
                exclude_inputs: false,
1✔
955
                version: 0,
1✔
956
            })
1✔
957
            .await
1✔
958
            .expect("fetch_utxos should succeed");
1✔
959

960
        assert_eq!(resp.blocks.len(), 5, "expected 5 blocks");
1✔
961
        assert_eq!(resp.blocks[0].height, 0, "Should be block (0)");
1✔
962
        assert_eq!(resp.blocks[1].height, 0, "Should be block (0)");
1✔
963
        assert_eq!(resp.blocks[2].height, 1, "Should be block (1)");
1✔
964
        assert_eq!(resp.blocks[3].height, 2, "Should be block (2)");
1✔
965
        assert_eq!(resp.blocks[4].height, 3, "Should be block (3)");
1✔
966
        let header_4 = service.db().fetch_header(4).await.unwrap().unwrap();
1✔
967
        assert_eq!(
1✔
968
            header_4.hash().to_vec(),
1✔
969
            resp.next_header_to_scan,
970
            "next header should point to 4"
971
        );
972
        assert!(!resp.has_next_page, "Should have no more pages");
1✔
973
    }
1✔
974
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc