• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 17099998871

20 Aug 2025 01:34PM UTC coverage: 55.122% (+0.6%) from 54.492%
17099998871

push

github

web-flow
feat: add cli payref search for minotari node (#7422)

Description
---
Added a payref search option for cli

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

- New Features
- Added a CLI command to search mined info by hex-encoded payment
reference; displays output commitment and mined details or a clear "not
found" message.

- Bug Fixes
- Payment-reference calculations now return empty lists for unconfirmed
transactions to avoid incorrect references until confirmed.

- Style
- Clarified UTXO fetch log messages and standardized mined/spent
timestamps to RFC 2822 for human-friendly display.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->

0 of 21 new or added lines in 3 files covered. (0.0%)

196 existing lines in 8 files now uncovered.

76920 of 139544 relevant lines covered (55.12%)

195664.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs
1
// Copyright 2019. The Tari Project
2
//
3
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
4
// following conditions are met:
5
//
6
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
7
// disclaimer.
8
//
9
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
10
// following disclaimer in the documentation and/or other materials provided with the distribution.
11
//
12
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
13
// products derived from this software without specific prior written permission.
14
//
15
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
20
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
21
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22

23
#[cfg(feature = "metrics")]
24
use std::convert::{TryFrom, TryInto};
25
use std::{cmp::max, collections::HashSet, sync::Arc, time::Instant};
26

27
use log::*;
28
use strum_macros::Display;
29
use tari_common_types::types::{BlockHash, FixedHash, HashOutput};
30
use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId};
31
use tari_utilities::hex::Hex;
32
use tokio::sync::RwLock;
33

34
#[cfg(feature = "metrics")]
35
use crate::base_node::metrics;
36
use crate::{
37
    base_node::comms_interface::{
38
        comms_response::ValidatorNodeChange,
39
        error::CommsInterfaceError,
40
        local_interface::BlockEventSender,
41
        FetchMempoolTransactionsResponse,
42
        NodeCommsRequest,
43
        NodeCommsResponse,
44
        OutboundNodeCommsInterface,
45
    },
46
    blocks::{Block, BlockBuilder, BlockHeader, BlockHeaderValidationError, ChainBlock, NewBlock, NewBlockTemplate},
47
    chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainStorageError},
48
    consensus::{ConsensusConstants, ConsensusManager},
49
    mempool::Mempool,
50
    proof_of_work::{
51
        cuckaroo_pow::cuckaroo_difficulty,
52
        monero_randomx_difficulty,
53
        randomx_factory::RandomXFactory,
54
        sha3x_difficulty,
55
        tari_randomx_difficulty,
56
        Difficulty,
57
        PowAlgorithm,
58
        PowError,
59
    },
60
    transactions::aggregated_body::AggregateBody,
61
    validation::{helpers, tari_rx_vm_key_height, ValidationError},
62
};
63

64
const LOG_TARGET: &str = "c::bn::comms_interface::inbound_handler";
65
const MAX_REQUEST_BY_BLOCK_HASHES: usize = 100;
66
const MAX_REQUEST_BY_KERNEL_EXCESS_SIGS: usize = 100;
67
const MAX_REQUEST_BY_UTXO_HASHES: usize = 100;
68
const MAX_MEMPOOL_TIMEOUT: u64 = 150;
69

70
/// Events that can be published on the Validated Block Event Stream
71
/// Broadcast is to notify subscribers if this is a valid propagated block event
72
#[derive(Debug, Clone, Display)]
73
pub enum BlockEvent {
74
    ValidBlockAdded(Arc<Block>, BlockAddResult),
75
    AddBlockValidationFailed {
76
        block: Arc<Block>,
77
        source_peer: Option<NodeId>,
78
    },
79
    AddBlockErrored {
80
        block: Arc<Block>,
81
    },
82
    BlockSyncComplete(Arc<ChainBlock>, u64),
83
    BlockSyncRewind(Vec<Arc<ChainBlock>>),
84
}
85

86
/// The InboundNodeCommsInterface is used to handle all received inbound requests from remote nodes.
87
pub struct InboundNodeCommsHandlers<B> {
88
    block_event_sender: BlockEventSender,
89
    blockchain_db: AsyncBlockchainDb<B>,
90
    mempool: Mempool,
91
    consensus_manager: ConsensusManager,
92
    list_of_reconciling_blocks: Arc<RwLock<HashSet<HashOutput>>>,
93
    outbound_nci: OutboundNodeCommsInterface,
94
    connectivity: ConnectivityRequester,
95
    randomx_factory: RandomXFactory,
96
}
97

98
impl<B> InboundNodeCommsHandlers<B>
99
where B: BlockchainBackend + 'static
100
{
101
    /// Construct a new InboundNodeCommsInterface.
102
    pub fn new(
×
103
        block_event_sender: BlockEventSender,
×
104
        blockchain_db: AsyncBlockchainDb<B>,
×
105
        mempool: Mempool,
×
106
        consensus_manager: ConsensusManager,
×
107
        outbound_nci: OutboundNodeCommsInterface,
×
108
        connectivity: ConnectivityRequester,
×
109
        randomx_factory: RandomXFactory,
×
110
    ) -> Self {
×
111
        Self {
×
112
            block_event_sender,
×
113
            blockchain_db,
×
114
            mempool,
×
115
            consensus_manager,
×
116
            list_of_reconciling_blocks: Arc::new(RwLock::new(HashSet::new())),
×
117
            outbound_nci,
×
118
            connectivity,
×
119
            randomx_factory,
×
120
        }
×
121
    }
×
122

123
    /// Handle inbound node comms requests from remote nodes and local services.
124
    #[allow(clippy::too_many_lines)]
125
    pub async fn handle_request(&self, request: NodeCommsRequest) -> Result<NodeCommsResponse, CommsInterfaceError> {
×
126
        trace!(target: LOG_TARGET, "Handling remote request {request}");
×
127
        match request {
×
128
            NodeCommsRequest::GetChainMetadata => Ok(NodeCommsResponse::ChainMetadata(
129
                self.blockchain_db.get_chain_metadata().await?,
×
130
            )),
131
            NodeCommsRequest::GetTargetDifficultyNextBlock(algo) => {
×
132
                let header = self.blockchain_db.fetch_tip_header().await?;
×
133
                let constants = self.consensus_manager.consensus_constants(header.header().height);
×
134
                let target_difficulty = self
×
135
                    .get_target_difficulty_for_next_block(algo, constants, *header.hash())
×
136
                    .await?;
×
137
                Ok(NodeCommsResponse::TargetDifficulty(target_difficulty))
×
138
            },
139
            NodeCommsRequest::FetchHeaders(range) => {
×
140
                let headers = self.blockchain_db.fetch_chain_headers(range).await?;
×
141
                Ok(NodeCommsResponse::BlockHeaders(headers))
×
142
            },
143
            NodeCommsRequest::FetchHeadersByHashes(block_hashes) => {
×
144
                if block_hashes.len() > MAX_REQUEST_BY_BLOCK_HASHES {
×
145
                    return Err(CommsInterfaceError::InvalidRequest {
×
146
                        request: "FetchHeadersByHashes",
×
147
                        details: format!(
×
148
                            "Exceeded maximum block hashes request (max: {}, got:{})",
×
149
                            MAX_REQUEST_BY_BLOCK_HASHES,
×
150
                            block_hashes.len()
×
151
                        ),
×
152
                    });
×
153
                }
×
154
                let mut block_headers = Vec::with_capacity(block_hashes.len());
×
155
                for block_hash in block_hashes {
×
156
                    let block_hex = block_hash.to_hex();
×
157
                    match self.blockchain_db.fetch_chain_header_by_block_hash(block_hash).await? {
×
158
                        Some(block_header) => {
×
159
                            block_headers.push(block_header);
×
160
                        },
×
161
                        None => {
162
                            error!(target: LOG_TARGET, "Could not fetch headers with hashes:{block_hex}");
×
163
                            return Err(CommsInterfaceError::InternalError(format!(
×
164
                                "Could not fetch headers with hashes:{block_hex}"
×
165
                            )));
×
166
                        },
167
                    }
168
                }
169
                Ok(NodeCommsResponse::BlockHeaders(block_headers))
×
170
            },
171
            NodeCommsRequest::FetchMatchingUtxos(utxo_hashes) => {
×
172
                let mut res = Vec::with_capacity(utxo_hashes.len());
×
173
                for (output, spent) in (self
×
174
                    .blockchain_db
×
175
                    .fetch_outputs_with_spend_status_at_tip(utxo_hashes)
×
176
                    .await?)
×
177
                    .into_iter()
×
178
                    .flatten()
×
179
                {
180
                    if !spent {
×
181
                        res.push(output);
×
182
                    }
×
183
                }
184
                Ok(NodeCommsResponse::TransactionOutputs(res))
×
185
            },
186
            NodeCommsRequest::FetchMatchingBlocks { range, compact } => {
×
187
                let blocks = self.blockchain_db.fetch_blocks(range, compact).await?;
×
188
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
189
            },
190
            NodeCommsRequest::FetchBlocksByKernelExcessSigs(excess_sigs) => {
×
191
                if excess_sigs.len() > MAX_REQUEST_BY_KERNEL_EXCESS_SIGS {
×
192
                    return Err(CommsInterfaceError::InvalidRequest {
×
193
                        request: "FetchBlocksByKernelExcessSigs",
×
194
                        details: format!(
×
195
                            "Exceeded maximum number of kernel excess sigs in request (max: {}, got:{})",
×
196
                            MAX_REQUEST_BY_KERNEL_EXCESS_SIGS,
×
197
                            excess_sigs.len()
×
198
                        ),
×
199
                    });
×
200
                }
×
201
                let mut blocks = Vec::with_capacity(excess_sigs.len());
×
202
                for sig in excess_sigs {
×
203
                    let sig_hex = sig.get_signature().to_hex();
×
204
                    debug!(
×
205
                        target: LOG_TARGET,
×
206
                        "A peer has requested a block with kernel with sig {sig_hex}"
×
207
                    );
208
                    match self.blockchain_db.fetch_block_with_kernel(sig).await {
×
209
                        Ok(Some(block)) => blocks.push(block),
×
210
                        Ok(None) => warn!(
×
211
                            target: LOG_TARGET,
×
212
                            "Could not provide requested block containing kernel with sig {sig_hex} to peer because not \
×
213
                             stored"
×
214
                        ),
215
                        Err(e) => warn!(
×
216
                            target: LOG_TARGET,
×
217
                            "Could not provide requested block containing kernel with sig {sig_hex} to peer because: {e}"
×
218
                        ),
219
                    }
220
                }
221
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
222
            },
223
            NodeCommsRequest::FetchBlocksByUtxos(commitments) => {
×
224
                if commitments.len() > MAX_REQUEST_BY_UTXO_HASHES {
×
225
                    return Err(CommsInterfaceError::InvalidRequest {
×
226
                        request: "FetchBlocksByUtxos",
×
227
                        details: format!(
×
228
                            "Exceeded maximum number of utxo hashes in request (max: {}, got:{})",
×
229
                            MAX_REQUEST_BY_UTXO_HASHES,
×
230
                            commitments.len()
×
231
                        ),
×
232
                    });
×
233
                }
×
234
                let mut blocks = Vec::with_capacity(commitments.len());
×
235
                for commitment in commitments {
×
236
                    let commitment_hex = commitment.to_hex();
×
237
                    debug!(
×
238
                        target: LOG_TARGET,
×
239
                        "A peer has requested a block with commitment {commitment_hex}",
×
240
                    );
241
                    match self.blockchain_db.fetch_block_with_utxo(commitment).await {
×
242
                        Ok(Some(block)) => blocks.push(block),
×
243
                        Ok(None) => warn!(
×
244
                            target: LOG_TARGET,
×
NEW
245
                            "Could not provide requested block with commitment {commitment_hex} because not stored"
×
246
                        ),
247
                        Err(e) => warn!(
×
248
                            target: LOG_TARGET,
×
NEW
249
                            "Could not provide requested block with commitment {commitment_hex} because: {e}"
×
250
                        ),
251
                    }
252
                }
253
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
254
            },
255
            NodeCommsRequest::GetHeaderByHash(hash) => {
×
256
                let header = self.blockchain_db.fetch_chain_header_by_block_hash(hash).await?;
×
257
                Ok(NodeCommsResponse::BlockHeader(header))
×
258
            },
259
            NodeCommsRequest::GetBlockByHash(hash) => {
×
260
                let block = self.blockchain_db.fetch_block_by_hash(hash, false).await?;
×
261
                Ok(NodeCommsResponse::HistoricalBlock(Box::new(block)))
×
262
            },
263
            NodeCommsRequest::GetNewBlockTemplate(request) => {
×
264
                let best_block_header = self.blockchain_db.fetch_tip_header().await?;
×
265
                let mut last_seen_hash = self.mempool.get_last_seen_hash().await?;
×
266
                let mut is_mempool_synced = false;
×
267
                let start = Instant::now();
×
268
                // this will wait a max of 150ms by default before returning anyway with a potential broken template
269
                // We need to ensure the mempool has seen the latest base node height before we can be confident the
270
                // template is correct
271
                while !is_mempool_synced && start.elapsed().as_millis() < MAX_MEMPOOL_TIMEOUT.into() {
×
272
                    if best_block_header.hash() == &last_seen_hash || last_seen_hash == FixedHash::default() {
×
273
                        is_mempool_synced = true;
×
274
                    } else {
×
275
                        tokio::time::sleep(std::time::Duration::from_millis(10)).await;
×
276
                        last_seen_hash = self.mempool.get_last_seen_hash().await?;
×
277
                    }
278
                }
279

280
                if !is_mempool_synced {
×
281
                    warn!(
×
282
                        target: LOG_TARGET,
×
283
                        "Mempool out of sync - last seen hash '{}' does not match the tip hash '{}'. This condition \
×
284
                         should auto correct with the next block template request",
×
285
                        last_seen_hash, best_block_header.hash()
×
286
                    );
287
                }
×
288
                let mut header = BlockHeader::from_previous(best_block_header.header());
×
289
                let constants = self.consensus_manager.consensus_constants(header.height);
×
290
                header.version = constants.blockchain_version().into();
×
291
                header.pow.pow_algo = request.algo;
×
292

×
293
                let constants_weight = constants.max_block_transaction_weight();
×
294
                let asking_weight = if request.max_weight > constants_weight || request.max_weight == 0 {
×
295
                    constants_weight
×
296
                } else {
297
                    request.max_weight
×
298
                };
299

300
                debug!(
×
301
                    target: LOG_TARGET,
×
302
                    "Fetching transactions with a maximum weight of {asking_weight} for the template"
×
303
                );
304
                let transactions = self
×
305
                    .mempool
×
306
                    .retrieve(asking_weight)
×
307
                    .await?
×
308
                    .into_iter()
×
309
                    .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone()))
×
310
                    .collect::<Vec<_>>();
×
311

×
312
                debug!(
×
313
                    target: LOG_TARGET,
×
314
                    "Adding {} transaction(s) to new block template",
×
315
                    transactions.len(),
×
316
                );
317

318
                let prev_hash = header.prev_hash;
×
319
                let height = header.height;
×
320

×
321
                let block = header.into_builder().with_transactions(transactions).build();
×
322
                let block_hash = block.hash();
×
323
                let block_template = NewBlockTemplate::from_block(
×
324
                    block,
×
325
                    self.get_target_difficulty_for_next_block(request.algo, constants, prev_hash)
×
326
                        .await?,
×
327
                    self.consensus_manager.get_block_reward_at(height),
×
328
                    is_mempool_synced,
×
329
                )?;
×
330

331
                debug!(target: LOG_TARGET,
×
332
                    "New block template requested and prepared at height: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}",
×
333
                    block_template.header.height,
×
334
                    block_template.target_difficulty,
×
335
                    block_hash.to_hex(),
×
336
                    block_template
×
337
                        .body
×
338
                        .calculate_weight(constants.transaction_weight_params())
×
339
                        .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?,
×
340
                    block_template.body.to_counts_string()
×
341
                );
342

343
                Ok(NodeCommsResponse::NewBlockTemplate(block_template))
×
344
            },
345
            NodeCommsRequest::GetNewBlock(block_template) => {
×
346
                let height = block_template.header.height;
×
347
                let target_difficulty = block_template.target_difficulty;
×
348
                let block = self.blockchain_db.prepare_new_block(block_template).await?;
×
349
                let constants = self.consensus_manager.consensus_constants(block.header.height);
×
350
                debug!(target: LOG_TARGET,
×
351
                    "Prepared block: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}",
×
352
                    height,
×
353
                    target_difficulty,
×
354
                    block.hash().to_hex(),
×
355
                    block
×
356
                        .body
×
357
                        .calculate_weight(constants.transaction_weight_params())
×
358
                        .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?,
×
359
                    block.body.to_counts_string()
×
360
                );
361
                Ok(NodeCommsResponse::NewBlock {
×
362
                    success: true,
×
363
                    error: None,
×
364
                    block: Some(block),
×
365
                })
×
366
            },
367
            NodeCommsRequest::GetBlockFromAllChains(hash) => {
×
368
                let block_hex = hash.to_hex();
×
369
                debug!(
×
370
                    target: LOG_TARGET,
×
371
                    "A peer has requested a block with hash {block_hex}"
×
372
                );
373

374
                #[allow(clippy::blocks_in_conditions)]
375
                let maybe_block = match self
×
376
                    .blockchain_db
×
377
                    .fetch_block_by_hash(hash, true)
×
378
                    .await
×
379
                    .unwrap_or_else(|e| {
×
380
                        warn!(
×
381
                            target: LOG_TARGET,
×
382
                            "Could not provide requested block {block_hex} to peer because: {e}",
×
383
                        );
384

385
                        None
×
386
                    }) {
×
387
                    None => self.blockchain_db.fetch_orphan(hash).await.map_or_else(
×
388
                        |e| {
×
389
                            warn!(
×
390
                                target: LOG_TARGET,
×
391
                                "Could not provide requested block {block_hex} to peer because: {e}"
×
392
                            );
393

394
                            None
×
395
                        },
×
396
                        Some,
×
397
                    ),
×
398
                    Some(block) => Some(block.into_block()),
×
399
                };
400

401
                Ok(NodeCommsResponse::Block(Box::new(maybe_block)))
×
402
            },
403
            NodeCommsRequest::FetchKernelByExcessSig(signature) => {
×
404
                let kernels = match self.blockchain_db.fetch_kernel_by_excess_sig(signature).await {
×
405
                    Ok(Some((kernel, _))) => vec![kernel],
×
406
                    Ok(None) => vec![],
×
407
                    Err(err) => {
×
408
                        error!(target: LOG_TARGET, "Could not fetch kernel {err}");
×
409
                        return Err(err.into());
×
410
                    },
411
                };
412

413
                Ok(NodeCommsResponse::TransactionKernels(kernels))
×
414
            },
415
            NodeCommsRequest::FetchMempoolTransactionsByExcessSigs { excess_sigs } => {
×
416
                let (transactions, not_found) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?;
×
417
                Ok(NodeCommsResponse::FetchMempoolTransactionsByExcessSigsResponse(
×
418
                    FetchMempoolTransactionsResponse {
×
419
                        transactions,
×
420
                        not_found,
×
421
                    },
×
422
                ))
×
423
            },
424
            NodeCommsRequest::FetchValidatorNodesKeys {
425
                height,
×
426
                validator_network,
×
427
            } => {
428
                let active_validator_nodes = self
×
429
                    .blockchain_db
×
430
                    .fetch_active_validator_nodes(height, validator_network)
×
431
                    .await?;
×
432
                Ok(NodeCommsResponse::FetchValidatorNodesKeysResponse(
×
433
                    active_validator_nodes,
×
434
                ))
×
435
            },
436
            NodeCommsRequest::GetValidatorNode {
437
                sidechain_id,
×
438
                public_key,
×
439
            } => {
440
                let vn = self.blockchain_db.get_validator_node(sidechain_id, public_key).await?;
×
441
                Ok(NodeCommsResponse::GetValidatorNode(vn))
×
442
            },
443
            NodeCommsRequest::FetchTemplateRegistrations {
444
                start_height,
×
445
                end_height,
×
446
            } => {
447
                let template_registrations = self
×
448
                    .blockchain_db
×
449
                    .fetch_template_registrations(start_height..=end_height)
×
450
                    .await?;
×
451
                Ok(NodeCommsResponse::FetchTemplateRegistrationsResponse(
×
452
                    template_registrations,
×
453
                ))
×
454
            },
455
            NodeCommsRequest::FetchUnspentUtxosInBlock { block_hash } => {
×
456
                let utxos = self.blockchain_db.fetch_outputs_in_block(block_hash).await?;
×
457
                Ok(NodeCommsResponse::TransactionOutputs(utxos))
×
458
            },
459
            NodeCommsRequest::FetchMinedInfoByPayRef(payref) => {
×
460
                let output_info = self.blockchain_db.fetch_mined_info_by_payref(payref).await?;
×
461
                Ok(NodeCommsResponse::MinedInfo(output_info))
×
462
            },
463
            NodeCommsRequest::FetchMinedInfoByOutputHash(output_hash) => {
×
464
                let output_info = self.blockchain_db.fetch_mined_info_by_output_hash(output_hash).await?;
×
465
                Ok(NodeCommsResponse::MinedInfo(output_info))
×
466
            },
467
            NodeCommsRequest::FetchOutputMinedInfo(output_hash) => {
×
468
                let output_info = self.blockchain_db.fetch_output(output_hash).await?;
×
469
                Ok(NodeCommsResponse::OutputMinedInfo(output_info))
×
470
            },
471
            NodeCommsRequest::CheckOutputSpentStatus(output_hash) => {
×
472
                let input_info = self.blockchain_db.fetch_input(output_hash).await?;
×
473
                Ok(NodeCommsResponse::InputMinedInfo(input_info))
×
474
            },
475
            NodeCommsRequest::FetchValidatorNodeChanges { epoch, sidechain_id } => {
×
476
                let added_validators = self
×
477
                    .blockchain_db
×
478
                    .fetch_validators_activating_in_epoch(sidechain_id.clone(), epoch)
×
479
                    .await?;
×
480

481
                let exit_validators = self
×
482
                    .blockchain_db
×
483
                    .fetch_validators_exiting_in_epoch(sidechain_id.clone(), epoch)
×
484
                    .await?;
×
485

486
                info!(
×
487
                    target: LOG_TARGET,
×
488
                    "Fetched {} validators activating and {} validators exiting in epoch {}",
×
489
                    added_validators.len(),
×
490
                    exit_validators.len(),
×
491
                    epoch,
492
                );
493

494
                let mut node_changes = Vec::with_capacity(added_validators.len() + exit_validators.len());
×
495

×
496
                node_changes.extend(added_validators.into_iter().map(|vn| ValidatorNodeChange::Add {
×
497
                    registration: vn.original_registration.into(),
×
498
                    activation_epoch: vn.activation_epoch,
×
499
                    minimum_value_promise: vn.minimum_value_promise,
×
500
                    shard_key: vn.shard_key,
×
501
                }));
×
502

×
503
                node_changes.extend(exit_validators.into_iter().map(|vn| ValidatorNodeChange::Remove {
×
504
                    public_key: vn.public_key,
×
505
                }));
×
506

×
507
                Ok(NodeCommsResponse::FetchValidatorNodeChangesResponse(node_changes))
×
508
            },
509
        }
510
    }
×
511

512
    /// Handles a `NewBlock` message. Only a single `NewBlock` message can be handled at once to prevent extraneous
513
    /// requests for the full block.
514
    /// This may (asynchronously) block until the other request(s) complete or time out and so should typically be
515
    /// executed in a dedicated task.
516
    pub async fn handle_new_block_message(
×
517
        &mut self,
×
518
        new_block: NewBlock,
×
519
        source_peer: NodeId,
×
520
    ) -> Result<(), CommsInterfaceError> {
×
521
        let block_hash = new_block.header.hash();
×
522

×
523
        if self.blockchain_db.inner().is_add_block_disabled() {
×
524
            info!(
×
525
                target: LOG_TARGET,
×
526
                "Ignoring block message ({}) because add_block is locked",
×
527
                block_hash.to_hex()
×
528
            );
529
            return Ok(());
×
530
        }
×
531

×
532
        // Lets check if the block exists before we try and ask for a complete block
×
533
        if self.check_exists_and_not_bad_block(block_hash).await? {
×
534
            return Ok(());
×
535
        }
×
536

×
537
        // lets check that the difficulty at least matches 50% of the tip header. The max difficulty drop is 16%, thus
×
538
        // 50% is way more than that and in order to attack the node, you need 50% of the mining power. We cannot check
×
539
        // the target difficulty as orphan blocks dont have a target difficulty. All we care here is that bad
×
540
        // blocks are not free to make, and that they are more expensive to make then they are to validate. As
×
541
        // soon as a block can be linked to the main chain, a proper full proof of work check will
×
542
        // be done before any other validation.
×
543
        self.check_min_block_difficulty(&new_block).await?;
×
544

545
        {
546
            // we use a double lock to make sure we can only reconcile one unique block at a time. We may receive the
547
            // same block from multiple peer near simultaneously. We should only reconcile each unique block once.
548
            let read_lock = self.list_of_reconciling_blocks.read().await;
×
549
            if read_lock.contains(&block_hash) {
×
550
                debug!(
×
551
                    target: LOG_TARGET,
×
552
                    "Block with hash `{}` is already being reconciled",
×
553
                    block_hash.to_hex()
×
554
                );
555
                return Ok(());
×
556
            }
×
557
        }
558
        {
559
            let mut write_lock = self.list_of_reconciling_blocks.write().await;
×
560
            if self.check_exists_and_not_bad_block(block_hash).await? {
×
561
                return Ok(());
×
562
            }
×
563

×
564
            if !write_lock.insert(block_hash) {
×
565
                debug!(
×
566
                    target: LOG_TARGET,
×
567
                    "Block with hash `{}` is already being reconciled",
×
568
                    block_hash.to_hex()
×
569
                );
570
                return Ok(());
×
571
            }
×
572
        }
×
573

×
574
        debug!(
×
575
            target: LOG_TARGET,
×
576
            "Block with hash `{}` is unknown. Constructing block from known mempool transactions / requesting missing \
×
577
             transactions from peer '{}'.",
×
578
            block_hash.to_hex(),
×
579
            source_peer
580
        );
581

582
        let result = self.reconcile_and_add_block(source_peer.clone(), new_block).await;
×
583

584
        {
585
            let mut write_lock = self.list_of_reconciling_blocks.write().await;
×
586
            write_lock.remove(&block_hash);
×
587
        }
×
588
        result?;
×
589
        Ok(())
×
590
    }
×
591

592
    async fn check_min_block_difficulty(&self, new_block: &NewBlock) -> Result<(), CommsInterfaceError> {
×
593
        let constants = self.consensus_manager.consensus_constants(new_block.header.height);
×
594
        let gen_hash = *self.consensus_manager.get_genesis_block().hash();
×
595
        let mut min_difficulty = constants.min_pow_difficulty(new_block.header.pow.pow_algo);
×
596
        let mut header = self.blockchain_db.fetch_last_chain_header().await?;
×
597
        loop {
598
            if new_block.header.pow_algo() == header.header().pow_algo() {
×
599
                min_difficulty = max(
×
600
                    header
×
601
                        .accumulated_data()
×
602
                        .target_difficulty
×
603
                        .checked_div_u64(2)
×
604
                        .unwrap_or(min_difficulty),
×
605
                    min_difficulty,
×
606
                );
×
607
                break;
×
608
            }
×
609
            if header.height() == 0 {
×
610
                break;
×
611
            }
×
612
            // we have not reached gen block, and the pow algo does not match, so lets go further back
613
            header = self
×
614
                .blockchain_db
×
615
                .fetch_chain_header(header.height().saturating_sub(1))
×
616
                .await?;
×
617
        }
618
        let achieved = match new_block.header.pow_algo() {
×
619
            PowAlgorithm::RandomXM => monero_randomx_difficulty(
×
620
                &new_block.header,
×
621
                &self.randomx_factory,
×
622
                &gen_hash,
×
623
                &self.consensus_manager,
×
624
            )?,
×
625
            PowAlgorithm::Sha3x => sha3x_difficulty(&new_block.header)?,
×
626
            PowAlgorithm::RandomXT => {
627
                let vm_key = *self
×
628
                    .blockchain_db
×
629
                    .fetch_chain_header(tari_rx_vm_key_height(header.height()))
×
630
                    .await?
×
631
                    .hash();
×
632
                tari_randomx_difficulty(&new_block.header, &self.randomx_factory, &vm_key)?
×
633
            },
634
            PowAlgorithm::Cuckaroo => {
635
                let constants = self.consensus_manager.consensus_constants(new_block.header.height);
×
636
                let cuckaroo_cycle = constants.cuckaroo_cycle_length();
×
637
                let edge_bits = constants.cuckaroo_edge_bits();
×
638
                cuckaroo_difficulty(&new_block.header, cuckaroo_cycle, edge_bits)?
×
639
            },
640
        };
641
        if achieved < min_difficulty {
×
642
            return Err(CommsInterfaceError::InvalidBlockHeader(
×
643
                BlockHeaderValidationError::ProofOfWorkError(PowError::AchievedDifficultyBelowMin),
×
644
            ));
×
645
        }
×
646
        Ok(())
×
647
    }
×
648

649
    async fn check_exists_and_not_bad_block(&self, block: FixedHash) -> Result<bool, CommsInterfaceError> {
×
650
        if self.blockchain_db.chain_header_or_orphan_exists(block).await? {
×
651
            debug!(
×
652
                target: LOG_TARGET,
×
653
                "Block with hash `{}` already stored",
×
654
                block.to_hex()
×
655
            );
656
            return Ok(true);
×
657
        }
×
658
        let (is_bad_block, reason) = self.blockchain_db.bad_block_exists(block).await?;
×
659
        if is_bad_block {
×
660
            debug!(
×
661
                target: LOG_TARGET,
×
662
                "Block with hash `{}` already validated as a bad block due to `{}`",
×
663
                block.to_hex(), reason
×
664
            );
665
            return Err(CommsInterfaceError::ChainStorageError(
×
666
                ChainStorageError::ValidationError {
×
667
                    source: ValidationError::BadBlockFound {
×
668
                        hash: block.to_hex(),
×
669
                        reason,
×
670
                    },
×
671
                },
×
672
            ));
×
673
        }
×
674
        Ok(false)
×
675
    }
×
676

677
    async fn reconcile_and_add_block(
×
678
        &mut self,
×
679
        source_peer: NodeId,
×
680
        new_block: NewBlock,
×
681
    ) -> Result<(), CommsInterfaceError> {
×
682
        let block = self.reconcile_block(source_peer.clone(), new_block).await?;
×
683
        self.handle_block(block, Some(source_peer)).await?;
×
684
        Ok(())
×
685
    }
×
686

687
    #[allow(clippy::too_many_lines)]
688
    async fn reconcile_block(
×
689
        &mut self,
×
690
        source_peer: NodeId,
×
691
        new_block: NewBlock,
×
692
    ) -> Result<Block, CommsInterfaceError> {
×
693
        let NewBlock {
×
694
            header,
×
695
            coinbase_kernels,
×
696
            coinbase_outputs,
×
697
            kernel_excess_sigs: excess_sigs,
×
698
        } = new_block;
×
699
        // If the block is empty, we dont have to ask for the block, as we already have the full block available
×
700
        // to us.
×
701
        if excess_sigs.is_empty() {
×
702
            let block = BlockBuilder::new(header.version)
×
703
                .add_outputs(coinbase_outputs)
×
704
                .add_kernels(coinbase_kernels)
×
705
                .with_header(header)
×
706
                .build();
×
707
            return Ok(block);
×
708
        }
×
709

×
710
        let block_hash = header.hash();
×
711
        // We check the current tip and orphan status of the block because we cannot guarantee that mempool state is
712
        // correct and the mmr root calculation is only valid if the block is building on the tip.
713
        let current_meta = self.blockchain_db.get_chain_metadata().await?;
×
714
        if header.prev_hash != *current_meta.best_block_hash() {
×
715
            debug!(
×
716
                target: LOG_TARGET,
×
717
                "Orphaned block #{}: ({}), current tip is: #{} ({}). We need to fetch the complete block from peer: \
×
718
                 ({})",
×
719
                header.height,
×
720
                block_hash.to_hex(),
×
721
                current_meta.best_block_height(),
×
722
                current_meta.best_block_hash().to_hex(),
×
723
                source_peer,
724
            );
725
            #[allow(clippy::cast_possible_wrap)]
726
            #[cfg(feature = "metrics")]
727
            metrics::compact_block_tx_misses(header.height).set(excess_sigs.len() as i64);
×
728
            let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
729
            return Ok(block);
×
730
        }
×
731

732
        // We know that the block is neither and orphan or a coinbase, so lets ask our mempool for the transactions
733
        let (known_transactions, missing_excess_sigs) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?;
×
734
        let known_transactions = known_transactions.into_iter().map(|tx| (*tx).clone()).collect();
×
735

×
736
        #[allow(clippy::cast_possible_wrap)]
×
737
        #[cfg(feature = "metrics")]
×
738
        metrics::compact_block_tx_misses(header.height).set(missing_excess_sigs.len() as i64);
×
739

×
740
        let mut builder = BlockBuilder::new(header.version)
×
741
            .add_outputs(coinbase_outputs)
×
742
            .add_kernels(coinbase_kernels)
×
743
            .with_transactions(known_transactions);
×
744

×
745
        if missing_excess_sigs.is_empty() {
×
746
            debug!(
×
747
                target: LOG_TARGET,
×
748
                "All transactions for block #{} ({}) found in mempool",
×
749
                header.height,
×
750
                block_hash.to_hex()
×
751
            );
752
        } else {
753
            debug!(
×
754
                target: LOG_TARGET,
×
755
                "Requesting {} unknown transaction(s) from peer '{}'.",
×
756
                missing_excess_sigs.len(),
×
757
                source_peer
758
            );
759

760
            let FetchMempoolTransactionsResponse {
761
                transactions,
×
762
                not_found,
×
763
            } = self
×
764
                .outbound_nci
×
765
                .request_transactions_by_excess_sig(source_peer.clone(), missing_excess_sigs)
×
766
                .await?;
×
767

768
            // Add returned transactions to unconfirmed pool
769
            if !transactions.is_empty() {
×
770
                self.mempool.insert_all(transactions.clone()).await?;
×
771
            }
×
772

773
            if !not_found.is_empty() {
×
774
                warn!(
×
775
                    target: LOG_TARGET,
×
776
                    "Peer {} was not able to return all transactions for block #{} ({}). {} transaction(s) not found. \
×
777
                     Requesting full block.",
×
778
                    source_peer,
×
779
                    header.height,
×
780
                    block_hash.to_hex(),
×
781
                    not_found.len()
×
782
                );
783

784
                #[cfg(feature = "metrics")]
785
                metrics::compact_block_full_misses(header.height).inc();
×
786
                let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
787
                return Ok(block);
×
788
            }
×
789

×
790
            builder = builder.with_transactions(
×
791
                transactions
×
792
                    .into_iter()
×
793
                    .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone()))
×
794
                    .collect(),
×
795
            );
×
796
        }
×
797

798
        // NB: Add the header last because `with_transactions` etc updates the current header, but we have the final one
799
        // already
800
        builder = builder.with_header(header.clone());
×
801
        let block = builder.build();
×
802

803
        // Perform a sanity check on the reconstructed block, if the MMR roots don't match then it's possible one or
804
        // more transactions in our mempool had the same excess/signature for a *different* transaction.
805
        // This is extremely unlikely, but still possible. In case of a mismatch, request the full block from the peer.
806
        let (block, mmr_roots) = match self.blockchain_db.calculate_mmr_roots(block).await {
×
807
            Err(_) => {
808
                let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
809
                return Ok(block);
×
810
            },
811
            Ok(v) => v,
×
812
        };
813
        if let Err(e) = helpers::check_mmr_roots(&header, &mmr_roots) {
×
814
            warn!(
×
815
                target: LOG_TARGET,
×
816
                "Reconstructed block #{} ({}) failed MMR check validation!. Requesting full block. Error: {}",
×
817
                header.height,
×
818
                block_hash.to_hex(),
×
819
                e,
820
            );
821

822
            #[cfg(feature = "metrics")]
823
            metrics::compact_block_mmr_mismatch(header.height).inc();
×
824
            let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
825
            return Ok(block);
×
826
        }
×
827

×
828
        Ok(block)
×
829
    }
×
830

831
    async fn request_full_block_from_peer(
×
832
        &mut self,
×
833
        source_peer: NodeId,
×
834
        block_hash: BlockHash,
×
835
    ) -> Result<Block, CommsInterfaceError> {
×
836
        match self
×
837
            .outbound_nci
×
838
            .request_blocks_by_hashes_from_peer(block_hash, Some(source_peer.clone()))
×
839
            .await
×
840
        {
841
            Ok(Some(block)) => Ok(block),
×
842
            Ok(None) => {
843
                debug!(
×
844
                    target: LOG_TARGET,
×
845
                    "Peer `{source_peer}` failed to return the block that was requested."
×
846
                );
847
                Err(CommsInterfaceError::InvalidPeerResponse(format!(
×
848
                    "Invalid response from peer `{source_peer}`: Peer failed to provide the block that was propagated"
×
849
                )))
×
850
            },
851
            Err(CommsInterfaceError::UnexpectedApiResponse) => {
852
                debug!(
×
853
                    target: LOG_TARGET,
×
854
                    "Peer `{source_peer}` sent unexpected API response."
×
855
                );
856
                Err(CommsInterfaceError::UnexpectedApiResponse)
×
857
            },
858
            Err(e) => Err(e),
×
859
        }
860
    }
×
861

862
    /// Handle inbound blocks from remote nodes and local services.
863
    ///
864
    /// ## Arguments
865
    /// block - the block to store
866
    /// new_block_msg - propagate this new block message
867
    /// source_peer - the peer that sent this new block message, or None if the block was generated by a local miner
868
    pub async fn handle_block(
×
869
        &mut self,
×
870
        block: Block,
×
871
        source_peer: Option<NodeId>,
×
872
    ) -> Result<BlockHash, CommsInterfaceError> {
×
873
        let block_hash = block.hash();
×
874
        let block_height = block.header.height;
×
875

×
876
        info!(
×
877
            target: LOG_TARGET,
×
878
            "Block #{} ({}) received from {}",
×
879
            block_height,
×
880
            block_hash.to_hex(),
×
881
            source_peer
×
882
                .as_ref()
×
883
                .map(|p| format!("remote peer: {p}"))
×
884
                .unwrap_or_else(|| "local services".to_string())
×
885
        );
886
        debug!(target: LOG_TARGET, "Incoming block: {block}");
×
887
        let timer = Instant::now();
×
888
        let block = self.hydrate_block(block).await?;
×
889

890
        let add_block_result = self.blockchain_db.add_block(block.clone()).await;
×
891
        // Create block event on block event stream
892
        match add_block_result {
×
893
            Ok(block_add_result) => {
×
894
                debug!(
×
895
                    target: LOG_TARGET,
×
896
                    "Block #{} ({}) added ({}) to blockchain in {:.2?}",
×
897
                    block_height,
×
898
                    block_hash.to_hex(),
×
899
                    block_add_result,
×
900
                    timer.elapsed()
×
901
                );
902

903
                let should_propagate = match &block_add_result {
×
904
                    BlockAddResult::Ok(_) => true,
×
905
                    BlockAddResult::BlockExists => false,
×
906
                    BlockAddResult::OrphanBlock => false,
×
907
                    BlockAddResult::ChainReorg { .. } => true,
×
908
                };
909

910
                #[cfg(feature = "metrics")]
911
                self.update_block_result_metrics(&block_add_result).await?;
×
912

913
                self.publish_block_event(BlockEvent::ValidBlockAdded(block.clone(), block_add_result));
×
914

×
915
                if should_propagate {
×
916
                    debug!(
×
917
                        target: LOG_TARGET,
×
918
                        "Propagate block ({}) to network.",
×
919
                        block_hash.to_hex()
×
920
                    );
921
                    let exclude_peers = source_peer.into_iter().collect();
×
922
                    let new_block_msg = NewBlock::from(&*block);
×
923
                    if let Err(e) = self.outbound_nci.propagate_block(new_block_msg, exclude_peers).await {
×
924
                        warn!(
×
925
                            target: LOG_TARGET,
×
926
                            "Failed to propagate block ({}) to network: {}.",
×
927
                            block_hash.to_hex(), e
×
928
                        );
929
                    }
×
930
                }
×
931
                Ok(block_hash)
×
932
            },
933

934
            Err(e @ ChainStorageError::ValidationError { .. }) => {
×
935
                #[cfg(feature = "metrics")]
×
936
                {
×
937
                    let block_hash = block.hash();
×
938
                    metrics::rejected_blocks(block.header.height, &block_hash).inc();
×
939
                }
×
940
                warn!(
×
941
                    target: LOG_TARGET,
×
942
                    "Peer {} sent an invalid block: {}",
×
943
                    source_peer
×
944
                        .as_ref()
×
945
                        .map(ToString::to_string)
×
946
                        .unwrap_or_else(|| "<local request>".to_string()),
×
947
                    e
948
                );
949
                self.publish_block_event(BlockEvent::AddBlockValidationFailed { block, source_peer });
×
950
                Err(e.into())
×
951
            },
952

953
            Err(e) => {
×
954
                #[cfg(feature = "metrics")]
×
955
                metrics::rejected_blocks(block.header.height, &block.hash()).inc();
×
956

×
957
                self.publish_block_event(BlockEvent::AddBlockErrored { block });
×
958
                Err(e.into())
×
959
            },
960
        }
961
    }
×
962

963
    async fn hydrate_block(&mut self, block: Block) -> Result<Arc<Block>, CommsInterfaceError> {
×
964
        let block_hash = block.hash();
×
965
        let block_height = block.header.height;
×
966
        if block.body.inputs().is_empty() {
×
967
            debug!(
×
968
                target: LOG_TARGET,
×
969
                "Block #{} ({}) contains no inputs so nothing to hydrate",
×
970
                block_height,
×
971
                block_hash.to_hex(),
×
972
            );
973
            return Ok(Arc::new(block));
×
974
        }
×
975

×
976
        let timer = Instant::now();
×
977
        let (header, mut inputs, outputs, kernels) = block.dissolve();
×
978

979
        let db = self.blockchain_db.inner().db_read_access()?;
×
980
        for input in &mut inputs {
×
981
            if !input.is_compact() {
×
982
                continue;
×
983
            }
×
984

985
            let output_mined_info =
×
986
                db.fetch_output(&input.output_hash())?
×
987
                    .ok_or_else(|| CommsInterfaceError::InvalidFullBlock {
×
988
                        hash: block_hash,
×
989
                        details: format!("Output {} to be spent does not exist in db", input.output_hash()),
×
990
                    })?;
×
991

992
            input.add_output_data(output_mined_info.output);
×
993
        }
994
        debug!(
×
995
            target: LOG_TARGET,
×
996
            "Hydrated block #{} ({}) with {} input(s) in {:.2?}",
×
997
            block_height,
×
998
            block_hash.to_hex(),
×
999
            inputs.len(),
×
1000
            timer.elapsed()
×
1001
        );
1002
        let block = Block::new(header, AggregateBody::new(inputs, outputs, kernels));
×
1003
        Ok(Arc::new(block))
×
1004
    }
×
1005

1006
    fn publish_block_event(&self, event: BlockEvent) {
×
1007
        if let Err(event) = self.block_event_sender.send(Arc::new(event)) {
×
1008
            debug!(target: LOG_TARGET, "No event subscribers. Event {} dropped.", event.0)
×
1009
        }
×
1010
    }
×
1011

1012
    #[cfg(feature = "metrics")]
1013
    async fn update_block_result_metrics(&self, block_add_result: &BlockAddResult) -> Result<(), CommsInterfaceError> {
×
1014
        fn update_target_difficulty(block: &ChainBlock) {
×
1015
            match block.header().pow_algo() {
×
1016
                PowAlgorithm::Sha3x => {
×
1017
                    metrics::target_difficulty_sha()
×
1018
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1019
                },
×
1020
                PowAlgorithm::RandomXM => {
×
1021
                    metrics::target_difficulty_monero_randomx()
×
1022
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1023
                },
×
1024
                PowAlgorithm::RandomXT => {
×
1025
                    metrics::target_difficulty_tari_randomx()
×
1026
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1027
                },
×
1028
                PowAlgorithm::Cuckaroo => {
×
1029
                    metrics::target_difficulty_cuckaroo()
×
1030
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1031
                },
×
1032
            }
1033
        }
×
1034

1035
        match block_add_result {
×
1036
            BlockAddResult::Ok(ref block) => {
×
1037
                update_target_difficulty(block);
×
1038
                #[allow(clippy::cast_possible_wrap)]
×
1039
                metrics::tip_height().set(block.height() as i64);
×
1040
                let utxo_set_size = self.blockchain_db.utxo_count().await?;
×
1041
                metrics::utxo_set_size().set(utxo_set_size.try_into().unwrap_or(i64::MAX));
×
1042
            },
1043
            BlockAddResult::ChainReorg { added, removed } => {
×
1044
                if let Some(fork_height) = added.last().map(|b| b.height()) {
×
1045
                    #[allow(clippy::cast_possible_wrap)]
1046
                    metrics::tip_height().set(fork_height as i64);
×
1047
                    metrics::reorg(fork_height, added.len(), removed.len()).inc();
×
1048

1049
                    let utxo_set_size = self.blockchain_db.utxo_count().await?;
×
1050
                    metrics::utxo_set_size().set(utxo_set_size.try_into().unwrap_or(i64::MAX));
×
1051
                }
×
1052
                for block in added {
×
1053
                    update_target_difficulty(block);
×
1054
                }
×
1055
            },
1056
            BlockAddResult::OrphanBlock => {
×
1057
                metrics::orphaned_blocks().inc();
×
1058
            },
×
1059
            _ => {},
×
1060
        }
1061
        Ok(())
×
1062
    }
×
1063

1064
    async fn get_target_difficulty_for_next_block(
×
1065
        &self,
×
1066
        pow_algo: PowAlgorithm,
×
1067
        constants: &ConsensusConstants,
×
1068
        current_block_hash: HashOutput,
×
1069
    ) -> Result<Difficulty, CommsInterfaceError> {
×
1070
        let target_difficulty = self
×
1071
            .blockchain_db
×
1072
            .fetch_target_difficulty_for_next_block(pow_algo, current_block_hash)
×
1073
            .await?;
×
1074

1075
        let target = target_difficulty.calculate(
×
1076
            constants.min_pow_difficulty(pow_algo),
×
1077
            constants.max_pow_difficulty(pow_algo),
×
1078
        );
×
1079
        trace!(target: LOG_TARGET, "Target difficulty {target} for PoW {pow_algo}");
×
1080
        Ok(target)
×
1081
    }
×
1082

1083
    pub async fn get_last_seen_hash(&self) -> Result<FixedHash, CommsInterfaceError> {
×
1084
        self.mempool.get_last_seen_hash().await.map_err(|e| e.into())
×
1085
    }
×
1086
}
1087

1088
impl<B> Clone for InboundNodeCommsHandlers<B> {
1089
    fn clone(&self) -> Self {
×
1090
        Self {
×
1091
            block_event_sender: self.block_event_sender.clone(),
×
1092
            blockchain_db: self.blockchain_db.clone(),
×
1093
            mempool: self.mempool.clone(),
×
1094
            consensus_manager: self.consensus_manager.clone(),
×
1095
            list_of_reconciling_blocks: self.list_of_reconciling_blocks.clone(),
×
1096
            outbound_nci: self.outbound_nci.clone(),
×
1097
            connectivity: self.connectivity.clone(),
×
1098
            randomx_factory: self.randomx_factory.clone(),
×
1099
        }
×
1100
    }
×
1101
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc