• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 17275382059

27 Aug 2025 06:28PM UTC coverage: 60.14% (-0.1%) from 60.274%
17275382059

push

github

web-flow
chore: new release v5.0.0-pre.8 (#7446)

Description
---
new release

71505 of 118897 relevant lines covered (60.14%)

536444.51 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs
1
// Copyright 2019. The Tari Project
2
//
3
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
4
// following conditions are met:
5
//
6
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
7
// disclaimer.
8
//
9
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
10
// following disclaimer in the documentation and/or other materials provided with the distribution.
11
//
12
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
13
// products derived from this software without specific prior written permission.
14
//
15
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
20
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
21
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22

23
#[cfg(feature = "metrics")]
24
use std::convert::{TryFrom, TryInto};
25
use std::{cmp::max, collections::HashSet, sync::Arc, time::Instant};
26

27
use log::*;
28
use strum_macros::Display;
29
use tari_common_types::types::{BlockHash, FixedHash, HashOutput};
30
use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId};
31
use tari_node_components::blocks::{
32
    Block,
33
    BlockBuilder,
34
    BlockHeader,
35
    BlockHeaderValidationError,
36
    NewBlock,
37
    NewBlockTemplate,
38
};
39
use tari_transaction_components::{
40
    aggregated_body::AggregateBody,
41
    consensus::ConsensusConstants,
42
    tari_proof_of_work::{Difficulty, PowAlgorithm, PowError},
43
};
44
use tari_utilities::hex::Hex;
45
use tokio::sync::RwLock;
46

47
#[cfg(feature = "metrics")]
48
use crate::base_node::metrics;
49
use crate::{
50
    base_node::comms_interface::{
51
        comms_response::ValidatorNodeChange,
52
        error::CommsInterfaceError,
53
        local_interface::BlockEventSender,
54
        FetchMempoolTransactionsResponse,
55
        NodeCommsRequest,
56
        NodeCommsResponse,
57
        OutboundNodeCommsInterface,
58
    },
59
    blocks::ChainBlock,
60
    chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainStorageError},
61
    consensus::BaseNodeConsensusManager,
62
    mempool::Mempool,
63
    proof_of_work::{
64
        cuckaroo_pow::cuckaroo_difficulty,
65
        monero_randomx_difficulty,
66
        randomx_factory::RandomXFactory,
67
        sha3x_difficulty,
68
        tari_randomx_difficulty,
69
    },
70
    validation::{helpers, tari_rx_vm_key_height, ValidationError},
71
};
72
const LOG_TARGET: &str = "c::bn::comms_interface::inbound_handler";
73
const MAX_REQUEST_BY_BLOCK_HASHES: usize = 100;
74
const MAX_REQUEST_BY_KERNEL_EXCESS_SIGS: usize = 100;
75
const MAX_REQUEST_BY_UTXO_HASHES: usize = 100;
76
const MAX_MEMPOOL_TIMEOUT: u64 = 150;
77

78
/// Events that can be published on the Validated Block Event Stream
79
/// Broadcast is to notify subscribers if this is a valid propagated block event
80
#[derive(Debug, Clone, Display)]
81
pub enum BlockEvent {
82
    ValidBlockAdded(Arc<Block>, BlockAddResult),
83
    AddBlockValidationFailed {
84
        block: Arc<Block>,
85
        source_peer: Option<NodeId>,
86
    },
87
    AddBlockErrored {
88
        block: Arc<Block>,
89
    },
90
    BlockSyncComplete(Arc<ChainBlock>, u64),
91
    BlockSyncRewind(Vec<Arc<ChainBlock>>),
92
}
93

94
/// The InboundNodeCommsInterface is used to handle all received inbound requests from remote nodes.
95
pub struct InboundNodeCommsHandlers<B> {
96
    block_event_sender: BlockEventSender,
97
    blockchain_db: AsyncBlockchainDb<B>,
98
    mempool: Mempool,
99
    consensus_manager: BaseNodeConsensusManager,
100
    list_of_reconciling_blocks: Arc<RwLock<HashSet<HashOutput>>>,
101
    outbound_nci: OutboundNodeCommsInterface,
102
    connectivity: ConnectivityRequester,
103
    randomx_factory: RandomXFactory,
104
}
105

106
impl<B> InboundNodeCommsHandlers<B>
107
where B: BlockchainBackend + 'static
108
{
109
    /// Construct a new InboundNodeCommsInterface.
110
    pub fn new(
×
111
        block_event_sender: BlockEventSender,
×
112
        blockchain_db: AsyncBlockchainDb<B>,
×
113
        mempool: Mempool,
×
114
        consensus_manager: BaseNodeConsensusManager,
×
115
        outbound_nci: OutboundNodeCommsInterface,
×
116
        connectivity: ConnectivityRequester,
×
117
        randomx_factory: RandomXFactory,
×
118
    ) -> Self {
×
119
        Self {
×
120
            block_event_sender,
×
121
            blockchain_db,
×
122
            mempool,
×
123
            consensus_manager,
×
124
            list_of_reconciling_blocks: Arc::new(RwLock::new(HashSet::new())),
×
125
            outbound_nci,
×
126
            connectivity,
×
127
            randomx_factory,
×
128
        }
×
129
    }
×
130

131
    /// Handle inbound node comms requests from remote nodes and local services.
132
    #[allow(clippy::too_many_lines)]
133
    pub async fn handle_request(&self, request: NodeCommsRequest) -> Result<NodeCommsResponse, CommsInterfaceError> {
×
134
        trace!(target: LOG_TARGET, "Handling remote request {request}");
×
135
        match request {
×
136
            NodeCommsRequest::GetChainMetadata => Ok(NodeCommsResponse::ChainMetadata(
137
                self.blockchain_db.get_chain_metadata().await?,
×
138
            )),
139
            NodeCommsRequest::GetTargetDifficultyNextBlock(algo) => {
×
140
                let header = self.blockchain_db.fetch_tip_header().await?;
×
141
                let constants = self.consensus_manager.consensus_constants(header.header().height);
×
142
                let target_difficulty = self
×
143
                    .get_target_difficulty_for_next_block(algo, constants, *header.hash())
×
144
                    .await?;
×
145
                Ok(NodeCommsResponse::TargetDifficulty(target_difficulty))
×
146
            },
147
            NodeCommsRequest::FetchHeaders(range) => {
×
148
                let headers = self.blockchain_db.fetch_chain_headers(range).await?;
×
149
                Ok(NodeCommsResponse::BlockHeaders(headers))
×
150
            },
151
            NodeCommsRequest::FetchHeadersByHashes(block_hashes) => {
×
152
                if block_hashes.len() > MAX_REQUEST_BY_BLOCK_HASHES {
×
153
                    return Err(CommsInterfaceError::InvalidRequest {
×
154
                        request: "FetchHeadersByHashes",
×
155
                        details: format!(
×
156
                            "Exceeded maximum block hashes request (max: {}, got:{})",
×
157
                            MAX_REQUEST_BY_BLOCK_HASHES,
×
158
                            block_hashes.len()
×
159
                        ),
×
160
                    });
×
161
                }
×
162
                let mut block_headers = Vec::with_capacity(block_hashes.len());
×
163
                for block_hash in block_hashes {
×
164
                    let block_hex = block_hash.to_hex();
×
165
                    match self.blockchain_db.fetch_chain_header_by_block_hash(block_hash).await? {
×
166
                        Some(block_header) => {
×
167
                            block_headers.push(block_header);
×
168
                        },
×
169
                        None => {
170
                            error!(target: LOG_TARGET, "Could not fetch headers with hashes:{block_hex}");
×
171
                            return Err(CommsInterfaceError::InternalError(format!(
×
172
                                "Could not fetch headers with hashes:{block_hex}"
×
173
                            )));
×
174
                        },
175
                    }
176
                }
177
                Ok(NodeCommsResponse::BlockHeaders(block_headers))
×
178
            },
179
            NodeCommsRequest::FetchMatchingUtxos(utxo_hashes) => {
×
180
                let mut res = Vec::with_capacity(utxo_hashes.len());
×
181
                for (output, spent) in (self
×
182
                    .blockchain_db
×
183
                    .fetch_outputs_with_spend_status_at_tip(utxo_hashes)
×
184
                    .await?)
×
185
                    .into_iter()
×
186
                    .flatten()
×
187
                {
188
                    if !spent {
×
189
                        res.push(output);
×
190
                    }
×
191
                }
192
                Ok(NodeCommsResponse::TransactionOutputs(res))
×
193
            },
194
            NodeCommsRequest::FetchMatchingBlocks { range, compact } => {
×
195
                let blocks = self.blockchain_db.fetch_blocks(range, compact).await?;
×
196
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
197
            },
198
            NodeCommsRequest::FetchBlocksByKernelExcessSigs(excess_sigs) => {
×
199
                if excess_sigs.len() > MAX_REQUEST_BY_KERNEL_EXCESS_SIGS {
×
200
                    return Err(CommsInterfaceError::InvalidRequest {
×
201
                        request: "FetchBlocksByKernelExcessSigs",
×
202
                        details: format!(
×
203
                            "Exceeded maximum number of kernel excess sigs in request (max: {}, got:{})",
×
204
                            MAX_REQUEST_BY_KERNEL_EXCESS_SIGS,
×
205
                            excess_sigs.len()
×
206
                        ),
×
207
                    });
×
208
                }
×
209
                let mut blocks = Vec::with_capacity(excess_sigs.len());
×
210
                for sig in excess_sigs {
×
211
                    let sig_hex = sig.get_signature().to_hex();
×
212
                    debug!(
×
213
                        target: LOG_TARGET,
×
214
                        "A peer has requested a block with kernel with sig {sig_hex}"
×
215
                    );
216
                    match self.blockchain_db.fetch_block_with_kernel(sig).await {
×
217
                        Ok(Some(block)) => blocks.push(block),
×
218
                        Ok(None) => warn!(
×
219
                            target: LOG_TARGET,
×
220
                            "Could not provide requested block containing kernel with sig {sig_hex} to peer because not \
×
221
                             stored"
×
222
                        ),
223
                        Err(e) => warn!(
×
224
                            target: LOG_TARGET,
×
225
                            "Could not provide requested block containing kernel with sig {sig_hex} to peer because: {e}"
×
226
                        ),
227
                    }
228
                }
229
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
230
            },
231
            NodeCommsRequest::FetchBlocksByUtxos(commitments) => {
×
232
                if commitments.len() > MAX_REQUEST_BY_UTXO_HASHES {
×
233
                    return Err(CommsInterfaceError::InvalidRequest {
×
234
                        request: "FetchBlocksByUtxos",
×
235
                        details: format!(
×
236
                            "Exceeded maximum number of utxo hashes in request (max: {}, got:{})",
×
237
                            MAX_REQUEST_BY_UTXO_HASHES,
×
238
                            commitments.len()
×
239
                        ),
×
240
                    });
×
241
                }
×
242
                let mut blocks = Vec::with_capacity(commitments.len());
×
243
                for commitment in commitments {
×
244
                    let commitment_hex = commitment.to_hex();
×
245
                    debug!(
×
246
                        target: LOG_TARGET,
×
247
                        "A peer has requested a block with commitment {commitment_hex}",
×
248
                    );
249
                    match self.blockchain_db.fetch_block_with_utxo(commitment).await {
×
250
                        Ok(Some(block)) => blocks.push(block),
×
251
                        Ok(None) => warn!(
×
252
                            target: LOG_TARGET,
×
253
                            "Could not provide requested block with commitment {commitment_hex} because not stored"
×
254
                        ),
255
                        Err(e) => warn!(
×
256
                            target: LOG_TARGET,
×
257
                            "Could not provide requested block with commitment {commitment_hex} because: {e}"
×
258
                        ),
259
                    }
260
                }
261
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
262
            },
263
            NodeCommsRequest::GetHeaderByHash(hash) => {
×
264
                let header = self.blockchain_db.fetch_chain_header_by_block_hash(hash).await?;
×
265
                Ok(NodeCommsResponse::BlockHeader(header))
×
266
            },
267
            NodeCommsRequest::GetBlockByHash(hash) => {
×
268
                let block = self.blockchain_db.fetch_block_by_hash(hash, false).await?;
×
269
                Ok(NodeCommsResponse::HistoricalBlock(Box::new(block)))
×
270
            },
271
            NodeCommsRequest::GetNewBlockTemplate(request) => {
×
272
                let best_block_header = self.blockchain_db.fetch_tip_header().await?;
×
273
                let mut last_seen_hash = self.mempool.get_last_seen_hash().await?;
×
274
                let mut is_mempool_synced = false;
×
275
                let start = Instant::now();
×
276
                // this will wait a max of 150ms by default before returning anyway with a potential broken template
277
                // We need to ensure the mempool has seen the latest base node height before we can be confident the
278
                // template is correct
279
                while !is_mempool_synced && start.elapsed().as_millis() < MAX_MEMPOOL_TIMEOUT.into() {
×
280
                    if best_block_header.hash() == &last_seen_hash || last_seen_hash == FixedHash::default() {
×
281
                        is_mempool_synced = true;
×
282
                    } else {
×
283
                        tokio::time::sleep(std::time::Duration::from_millis(10)).await;
×
284
                        last_seen_hash = self.mempool.get_last_seen_hash().await?;
×
285
                    }
286
                }
287

288
                if !is_mempool_synced {
×
289
                    warn!(
×
290
                        target: LOG_TARGET,
×
291
                        "Mempool out of sync - last seen hash '{}' does not match the tip hash '{}'. This condition \
×
292
                         should auto correct with the next block template request",
×
293
                        last_seen_hash, best_block_header.hash()
×
294
                    );
295
                }
×
296
                let mut header = BlockHeader::from_previous(best_block_header.header());
×
297
                let constants = self.consensus_manager.consensus_constants(header.height);
×
298
                header.version = constants.blockchain_version().into();
×
299
                header.pow.pow_algo = request.algo;
×
300

×
301
                let constants_weight = constants.max_block_transaction_weight();
×
302
                let asking_weight = if request.max_weight > constants_weight || request.max_weight == 0 {
×
303
                    constants_weight
×
304
                } else {
305
                    request.max_weight
×
306
                };
307

308
                debug!(
×
309
                    target: LOG_TARGET,
×
310
                    "Fetching transactions with a maximum weight of {asking_weight} for the template"
×
311
                );
312
                let transactions = self
×
313
                    .mempool
×
314
                    .retrieve(asking_weight)
×
315
                    .await?
×
316
                    .into_iter()
×
317
                    .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone()))
×
318
                    .collect::<Vec<_>>();
×
319

×
320
                debug!(
×
321
                    target: LOG_TARGET,
×
322
                    "Adding {} transaction(s) to new block template",
×
323
                    transactions.len(),
×
324
                );
325

326
                let prev_hash = header.prev_hash;
×
327
                let height = header.height;
×
328

×
329
                let block = header.into_builder().with_transactions(transactions).build();
×
330
                let block_hash = block.hash();
×
331
                let block_template = NewBlockTemplate::from_block(
×
332
                    block,
×
333
                    self.get_target_difficulty_for_next_block(request.algo, constants, prev_hash)
×
334
                        .await?,
×
335
                    self.consensus_manager.get_block_reward_at(height),
×
336
                    is_mempool_synced,
×
337
                )?;
×
338

339
                debug!(target: LOG_TARGET,
×
340
                    "New block template requested and prepared at height: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}",
×
341
                    block_template.header.height,
×
342
                    block_template.target_difficulty,
×
343
                    block_hash.to_hex(),
×
344
                    block_template
×
345
                        .body
×
346
                        .calculate_weight(constants.transaction_weight_params())
×
347
                        .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?,
×
348
                    block_template.body.to_counts_string()
×
349
                );
350

351
                Ok(NodeCommsResponse::NewBlockTemplate(block_template))
×
352
            },
353
            NodeCommsRequest::GetNewBlock(block_template) => {
×
354
                let height = block_template.header.height;
×
355
                let target_difficulty = block_template.target_difficulty;
×
356
                let block = self.blockchain_db.prepare_new_block(block_template).await?;
×
357
                let constants = self.consensus_manager.consensus_constants(block.header.height);
×
358
                debug!(target: LOG_TARGET,
×
359
                    "Prepared block: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}",
×
360
                    height,
×
361
                    target_difficulty,
×
362
                    block.hash().to_hex(),
×
363
                    block
×
364
                        .body
×
365
                        .calculate_weight(constants.transaction_weight_params())
×
366
                        .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?,
×
367
                    block.body.to_counts_string()
×
368
                );
369
                Ok(NodeCommsResponse::NewBlock {
×
370
                    success: true,
×
371
                    error: None,
×
372
                    block: Some(block),
×
373
                })
×
374
            },
375
            NodeCommsRequest::GetBlockFromAllChains(hash) => {
×
376
                let block_hex = hash.to_hex();
×
377
                debug!(
×
378
                    target: LOG_TARGET,
×
379
                    "A peer has requested a block with hash {block_hex}"
×
380
                );
381

382
                #[allow(clippy::blocks_in_conditions)]
383
                let maybe_block = match self
×
384
                    .blockchain_db
×
385
                    .fetch_block_by_hash(hash, true)
×
386
                    .await
×
387
                    .unwrap_or_else(|e| {
×
388
                        warn!(
×
389
                            target: LOG_TARGET,
×
390
                            "Could not provide requested block {block_hex} to peer because: {e}",
×
391
                        );
392

393
                        None
×
394
                    }) {
×
395
                    None => self.blockchain_db.fetch_orphan(hash).await.map_or_else(
×
396
                        |e| {
×
397
                            warn!(
×
398
                                target: LOG_TARGET,
×
399
                                "Could not provide requested block {block_hex} to peer because: {e}"
×
400
                            );
401

402
                            None
×
403
                        },
×
404
                        Some,
×
405
                    ),
×
406
                    Some(block) => Some(block.into_block()),
×
407
                };
408

409
                Ok(NodeCommsResponse::Block(Box::new(maybe_block)))
×
410
            },
411
            NodeCommsRequest::FetchKernelByExcessSig(signature) => {
×
412
                let kernels = match self.blockchain_db.fetch_kernel_by_excess_sig(signature).await {
×
413
                    Ok(Some((kernel, _))) => vec![kernel],
×
414
                    Ok(None) => vec![],
×
415
                    Err(err) => {
×
416
                        error!(target: LOG_TARGET, "Could not fetch kernel {err}");
×
417
                        return Err(err.into());
×
418
                    },
419
                };
420

421
                Ok(NodeCommsResponse::TransactionKernels(kernels))
×
422
            },
423
            NodeCommsRequest::FetchMempoolTransactionsByExcessSigs { excess_sigs } => {
×
424
                let (transactions, not_found) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?;
×
425
                Ok(NodeCommsResponse::FetchMempoolTransactionsByExcessSigsResponse(
×
426
                    FetchMempoolTransactionsResponse {
×
427
                        transactions,
×
428
                        not_found,
×
429
                    },
×
430
                ))
×
431
            },
432
            NodeCommsRequest::FetchValidatorNodesKeys {
433
                height,
×
434
                validator_network,
×
435
            } => {
436
                let active_validator_nodes = self
×
437
                    .blockchain_db
×
438
                    .fetch_active_validator_nodes(height, validator_network)
×
439
                    .await?;
×
440
                Ok(NodeCommsResponse::FetchValidatorNodesKeysResponse(
×
441
                    active_validator_nodes,
×
442
                ))
×
443
            },
444
            NodeCommsRequest::GetValidatorNode {
445
                sidechain_id,
×
446
                public_key,
×
447
            } => {
448
                let vn = self.blockchain_db.get_validator_node(sidechain_id, public_key).await?;
×
449
                Ok(NodeCommsResponse::GetValidatorNode(vn))
×
450
            },
451
            NodeCommsRequest::FetchTemplateRegistrations {
452
                start_height,
×
453
                end_height,
×
454
            } => {
455
                let template_registrations = self
×
456
                    .blockchain_db
×
457
                    .fetch_template_registrations(start_height..=end_height)
×
458
                    .await?;
×
459
                Ok(NodeCommsResponse::FetchTemplateRegistrationsResponse(
×
460
                    template_registrations,
×
461
                ))
×
462
            },
463
            NodeCommsRequest::FetchUnspentUtxosInBlock { block_hash } => {
×
464
                let utxos = self.blockchain_db.fetch_outputs_in_block(block_hash).await?;
×
465
                Ok(NodeCommsResponse::TransactionOutputs(utxos))
×
466
            },
467
            NodeCommsRequest::FetchMinedInfoByPayRef(payref) => {
×
468
                let output_info = self.blockchain_db.fetch_mined_info_by_payref(payref).await?;
×
469
                Ok(NodeCommsResponse::MinedInfo(output_info))
×
470
            },
471
            NodeCommsRequest::FetchMinedInfoByOutputHash(output_hash) => {
×
472
                let output_info = self.blockchain_db.fetch_mined_info_by_output_hash(output_hash).await?;
×
473
                Ok(NodeCommsResponse::MinedInfo(output_info))
×
474
            },
475
            NodeCommsRequest::FetchOutputMinedInfo(output_hash) => {
×
476
                let output_info = self.blockchain_db.fetch_output(output_hash).await?;
×
477
                Ok(NodeCommsResponse::OutputMinedInfo(output_info))
×
478
            },
479
            NodeCommsRequest::CheckOutputSpentStatus(output_hash) => {
×
480
                let input_info = self.blockchain_db.fetch_input(output_hash).await?;
×
481
                Ok(NodeCommsResponse::InputMinedInfo(input_info))
×
482
            },
483
            NodeCommsRequest::FetchValidatorNodeChanges { epoch, sidechain_id } => {
×
484
                let added_validators = self
×
485
                    .blockchain_db
×
486
                    .fetch_validators_activating_in_epoch(sidechain_id.clone(), epoch)
×
487
                    .await?;
×
488

489
                let exit_validators = self
×
490
                    .blockchain_db
×
491
                    .fetch_validators_exiting_in_epoch(sidechain_id.clone(), epoch)
×
492
                    .await?;
×
493

494
                info!(
×
495
                    target: LOG_TARGET,
×
496
                    "Fetched {} validators activating and {} validators exiting in epoch {}",
×
497
                    added_validators.len(),
×
498
                    exit_validators.len(),
×
499
                    epoch,
500
                );
501

502
                let mut node_changes = Vec::with_capacity(added_validators.len() + exit_validators.len());
×
503

×
504
                node_changes.extend(added_validators.into_iter().map(|vn| ValidatorNodeChange::Add {
×
505
                    registration: vn.original_registration.into(),
×
506
                    activation_epoch: vn.activation_epoch,
×
507
                    minimum_value_promise: vn.minimum_value_promise,
×
508
                    shard_key: vn.shard_key,
×
509
                }));
×
510

×
511
                node_changes.extend(exit_validators.into_iter().map(|vn| ValidatorNodeChange::Remove {
×
512
                    public_key: vn.public_key,
×
513
                }));
×
514

×
515
                Ok(NodeCommsResponse::FetchValidatorNodeChangesResponse(node_changes))
×
516
            },
517
        }
518
    }
×
519

520
    /// Handles a `NewBlock` message. Only a single `NewBlock` message can be handled at once to prevent extraneous
521
    /// requests for the full block.
522
    /// This may (asynchronously) block until the other request(s) complete or time out and so should typically be
523
    /// executed in a dedicated task.
524
    pub async fn handle_new_block_message(
×
525
        &mut self,
×
526
        new_block: NewBlock,
×
527
        source_peer: NodeId,
×
528
    ) -> Result<(), CommsInterfaceError> {
×
529
        let block_hash = new_block.header.hash();
×
530

×
531
        if self.blockchain_db.inner().is_add_block_disabled() {
×
532
            info!(
×
533
                target: LOG_TARGET,
×
534
                "Ignoring block message ({}) because add_block is locked",
×
535
                block_hash.to_hex()
×
536
            );
537
            return Ok(());
×
538
        }
×
539

×
540
        // Lets check if the block exists before we try and ask for a complete block
×
541
        if self.check_exists_and_not_bad_block(block_hash).await? {
×
542
            return Ok(());
×
543
        }
×
544

×
545
        // lets check that the difficulty at least matches 50% of the tip header. The max difficulty drop is 16%, thus
×
546
        // 50% is way more than that and in order to attack the node, you need 50% of the mining power. We cannot check
×
547
        // the target difficulty as orphan blocks dont have a target difficulty. All we care here is that bad
×
548
        // blocks are not free to make, and that they are more expensive to make then they are to validate. As
×
549
        // soon as a block can be linked to the main chain, a proper full proof of work check will
×
550
        // be done before any other validation.
×
551
        self.check_min_block_difficulty(&new_block).await?;
×
552

553
        {
554
            // we use a double lock to make sure we can only reconcile one unique block at a time. We may receive the
555
            // same block from multiple peer near simultaneously. We should only reconcile each unique block once.
556
            let read_lock = self.list_of_reconciling_blocks.read().await;
×
557
            if read_lock.contains(&block_hash) {
×
558
                debug!(
×
559
                    target: LOG_TARGET,
×
560
                    "Block with hash `{}` is already being reconciled",
×
561
                    block_hash.to_hex()
×
562
                );
563
                return Ok(());
×
564
            }
×
565
        }
566
        {
567
            let mut write_lock = self.list_of_reconciling_blocks.write().await;
×
568
            if self.check_exists_and_not_bad_block(block_hash).await? {
×
569
                return Ok(());
×
570
            }
×
571

×
572
            if !write_lock.insert(block_hash) {
×
573
                debug!(
×
574
                    target: LOG_TARGET,
×
575
                    "Block with hash `{}` is already being reconciled",
×
576
                    block_hash.to_hex()
×
577
                );
578
                return Ok(());
×
579
            }
×
580
        }
×
581

×
582
        debug!(
×
583
            target: LOG_TARGET,
×
584
            "Block with hash `{}` is unknown. Constructing block from known mempool transactions / requesting missing \
×
585
             transactions from peer '{}'.",
×
586
            block_hash.to_hex(),
×
587
            source_peer
588
        );
589

590
        let result = self.reconcile_and_add_block(source_peer.clone(), new_block).await;
×
591

592
        {
593
            let mut write_lock = self.list_of_reconciling_blocks.write().await;
×
594
            write_lock.remove(&block_hash);
×
595
        }
×
596
        result?;
×
597
        Ok(())
×
598
    }
×
599

600
    async fn check_min_block_difficulty(&self, new_block: &NewBlock) -> Result<(), CommsInterfaceError> {
×
601
        let constants = self.consensus_manager.consensus_constants(new_block.header.height);
×
602
        let gen_hash = *self.consensus_manager.get_genesis_block().hash();
×
603
        let mut min_difficulty = constants.min_pow_difficulty(new_block.header.pow.pow_algo);
×
604
        let mut header = self.blockchain_db.fetch_last_chain_header().await?;
×
605
        loop {
606
            if new_block.header.pow_algo() == header.header().pow_algo() {
×
607
                min_difficulty = max(
×
608
                    header
×
609
                        .accumulated_data()
×
610
                        .target_difficulty
×
611
                        .checked_div_u64(2)
×
612
                        .unwrap_or(min_difficulty),
×
613
                    min_difficulty,
×
614
                );
×
615
                break;
×
616
            }
×
617
            if header.height() == 0 {
×
618
                break;
×
619
            }
×
620
            // we have not reached gen block, and the pow algo does not match, so lets go further back
621
            header = self
×
622
                .blockchain_db
×
623
                .fetch_chain_header(header.height().saturating_sub(1))
×
624
                .await?;
×
625
        }
626
        let achieved = match new_block.header.pow_algo() {
×
627
            PowAlgorithm::RandomXM => monero_randomx_difficulty(
×
628
                &new_block.header,
×
629
                &self.randomx_factory,
×
630
                &gen_hash,
×
631
                &self.consensus_manager,
×
632
            )?,
×
633
            PowAlgorithm::Sha3x => sha3x_difficulty(&new_block.header)?,
×
634
            PowAlgorithm::RandomXT => {
635
                let vm_key = *self
×
636
                    .blockchain_db
×
637
                    .fetch_chain_header(tari_rx_vm_key_height(header.height()))
×
638
                    .await?
×
639
                    .hash();
×
640
                tari_randomx_difficulty(&new_block.header, &self.randomx_factory, &vm_key)?
×
641
            },
642
            PowAlgorithm::Cuckaroo => {
643
                let constants = self.consensus_manager.consensus_constants(new_block.header.height);
×
644
                let cuckaroo_cycle = constants.cuckaroo_cycle_length();
×
645
                let edge_bits = constants.cuckaroo_edge_bits();
×
646
                cuckaroo_difficulty(&new_block.header, cuckaroo_cycle, edge_bits)?
×
647
            },
648
        };
649
        if achieved < min_difficulty {
×
650
            return Err(CommsInterfaceError::InvalidBlockHeader(
×
651
                BlockHeaderValidationError::ProofOfWorkError(PowError::AchievedDifficultyBelowMin),
×
652
            ));
×
653
        }
×
654
        Ok(())
×
655
    }
×
656

657
    async fn check_exists_and_not_bad_block(&self, block: FixedHash) -> Result<bool, CommsInterfaceError> {
×
658
        if self.blockchain_db.chain_header_or_orphan_exists(block).await? {
×
659
            debug!(
×
660
                target: LOG_TARGET,
×
661
                "Block with hash `{}` already stored",
×
662
                block.to_hex()
×
663
            );
664
            return Ok(true);
×
665
        }
×
666
        let (is_bad_block, reason) = self.blockchain_db.bad_block_exists(block).await?;
×
667
        if is_bad_block {
×
668
            debug!(
×
669
                target: LOG_TARGET,
×
670
                "Block with hash `{}` already validated as a bad block due to `{}`",
×
671
                block.to_hex(), reason
×
672
            );
673
            return Err(CommsInterfaceError::ChainStorageError(
×
674
                ChainStorageError::ValidationError {
×
675
                    source: ValidationError::BadBlockFound {
×
676
                        hash: block.to_hex(),
×
677
                        reason,
×
678
                    },
×
679
                },
×
680
            ));
×
681
        }
×
682
        Ok(false)
×
683
    }
×
684

685
    async fn reconcile_and_add_block(
×
686
        &mut self,
×
687
        source_peer: NodeId,
×
688
        new_block: NewBlock,
×
689
    ) -> Result<(), CommsInterfaceError> {
×
690
        let block = self.reconcile_block(source_peer.clone(), new_block).await?;
×
691
        self.handle_block(block, Some(source_peer)).await?;
×
692
        Ok(())
×
693
    }
×
694

695
    #[allow(clippy::too_many_lines)]
696
    async fn reconcile_block(
×
697
        &mut self,
×
698
        source_peer: NodeId,
×
699
        new_block: NewBlock,
×
700
    ) -> Result<Block, CommsInterfaceError> {
×
701
        let NewBlock {
×
702
            header,
×
703
            coinbase_kernels,
×
704
            coinbase_outputs,
×
705
            kernel_excess_sigs: excess_sigs,
×
706
        } = new_block;
×
707
        // If the block is empty, we dont have to ask for the block, as we already have the full block available
×
708
        // to us.
×
709
        if excess_sigs.is_empty() {
×
710
            let block = BlockBuilder::new(header.version)
×
711
                .add_outputs(coinbase_outputs)
×
712
                .add_kernels(coinbase_kernels)
×
713
                .with_header(header)
×
714
                .build();
×
715
            return Ok(block);
×
716
        }
×
717

×
718
        let block_hash = header.hash();
×
719
        // We check the current tip and orphan status of the block because we cannot guarantee that mempool state is
720
        // correct and the mmr root calculation is only valid if the block is building on the tip.
721
        let current_meta = self.blockchain_db.get_chain_metadata().await?;
×
722
        if header.prev_hash != *current_meta.best_block_hash() {
×
723
            debug!(
×
724
                target: LOG_TARGET,
×
725
                "Orphaned block #{}: ({}), current tip is: #{} ({}). We need to fetch the complete block from peer: \
×
726
                 ({})",
×
727
                header.height,
×
728
                block_hash.to_hex(),
×
729
                current_meta.best_block_height(),
×
730
                current_meta.best_block_hash().to_hex(),
×
731
                source_peer,
732
            );
733
            #[allow(clippy::cast_possible_wrap)]
734
            #[cfg(feature = "metrics")]
735
            metrics::compact_block_tx_misses(header.height).set(excess_sigs.len() as i64);
×
736
            let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
737
            return Ok(block);
×
738
        }
×
739

740
        // We know that the block is neither and orphan or a coinbase, so lets ask our mempool for the transactions
741
        let (known_transactions, missing_excess_sigs) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?;
×
742
        let known_transactions = known_transactions.into_iter().map(|tx| (*tx).clone()).collect();
×
743

×
744
        #[allow(clippy::cast_possible_wrap)]
×
745
        #[cfg(feature = "metrics")]
×
746
        metrics::compact_block_tx_misses(header.height).set(missing_excess_sigs.len() as i64);
×
747

×
748
        let mut builder = BlockBuilder::new(header.version)
×
749
            .add_outputs(coinbase_outputs)
×
750
            .add_kernels(coinbase_kernels)
×
751
            .with_transactions(known_transactions);
×
752

×
753
        if missing_excess_sigs.is_empty() {
×
754
            debug!(
×
755
                target: LOG_TARGET,
×
756
                "All transactions for block #{} ({}) found in mempool",
×
757
                header.height,
×
758
                block_hash.to_hex()
×
759
            );
760
        } else {
761
            debug!(
×
762
                target: LOG_TARGET,
×
763
                "Requesting {} unknown transaction(s) from peer '{}'.",
×
764
                missing_excess_sigs.len(),
×
765
                source_peer
766
            );
767

768
            let FetchMempoolTransactionsResponse {
769
                transactions,
×
770
                not_found,
×
771
            } = self
×
772
                .outbound_nci
×
773
                .request_transactions_by_excess_sig(source_peer.clone(), missing_excess_sigs)
×
774
                .await?;
×
775

776
            // Add returned transactions to unconfirmed pool
777
            if !transactions.is_empty() {
×
778
                self.mempool.insert_all(transactions.clone()).await?;
×
779
            }
×
780

781
            if !not_found.is_empty() {
×
782
                warn!(
×
783
                    target: LOG_TARGET,
×
784
                    "Peer {} was not able to return all transactions for block #{} ({}). {} transaction(s) not found. \
×
785
                     Requesting full block.",
×
786
                    source_peer,
×
787
                    header.height,
×
788
                    block_hash.to_hex(),
×
789
                    not_found.len()
×
790
                );
791

792
                #[cfg(feature = "metrics")]
793
                metrics::compact_block_full_misses(header.height).inc();
×
794
                let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
795
                return Ok(block);
×
796
            }
×
797

×
798
            builder = builder.with_transactions(
×
799
                transactions
×
800
                    .into_iter()
×
801
                    .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone()))
×
802
                    .collect(),
×
803
            );
×
804
        }
×
805

806
        // NB: Add the header last because `with_transactions` etc updates the current header, but we have the final one
807
        // already
808
        builder = builder.with_header(header.clone());
×
809
        let block = builder.build();
×
810

811
        // Perform a sanity check on the reconstructed block, if the MMR roots don't match then it's possible one or
812
        // more transactions in our mempool had the same excess/signature for a *different* transaction.
813
        // This is extremely unlikely, but still possible. In case of a mismatch, request the full block from the peer.
814
        let (block, mmr_roots) = match self.blockchain_db.calculate_mmr_roots(block).await {
×
815
            Err(_) => {
816
                let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
817
                return Ok(block);
×
818
            },
819
            Ok(v) => v,
×
820
        };
821
        if let Err(e) = helpers::check_mmr_roots(&header, &mmr_roots) {
×
822
            warn!(
×
823
                target: LOG_TARGET,
×
824
                "Reconstructed block #{} ({}) failed MMR check validation!. Requesting full block. Error: {}",
×
825
                header.height,
×
826
                block_hash.to_hex(),
×
827
                e,
828
            );
829

830
            #[cfg(feature = "metrics")]
831
            metrics::compact_block_mmr_mismatch(header.height).inc();
×
832
            let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
833
            return Ok(block);
×
834
        }
×
835

×
836
        Ok(block)
×
837
    }
×
838

839
    async fn request_full_block_from_peer(
×
840
        &mut self,
×
841
        source_peer: NodeId,
×
842
        block_hash: BlockHash,
×
843
    ) -> Result<Block, CommsInterfaceError> {
×
844
        match self
×
845
            .outbound_nci
×
846
            .request_blocks_by_hashes_from_peer(block_hash, Some(source_peer.clone()))
×
847
            .await
×
848
        {
849
            Ok(Some(block)) => Ok(block),
×
850
            Ok(None) => {
851
                debug!(
×
852
                    target: LOG_TARGET,
×
853
                    "Peer `{source_peer}` failed to return the block that was requested."
×
854
                );
855
                Err(CommsInterfaceError::InvalidPeerResponse(format!(
×
856
                    "Invalid response from peer `{source_peer}`: Peer failed to provide the block that was propagated"
×
857
                )))
×
858
            },
859
            Err(CommsInterfaceError::UnexpectedApiResponse) => {
860
                debug!(
×
861
                    target: LOG_TARGET,
×
862
                    "Peer `{source_peer}` sent unexpected API response."
×
863
                );
864
                Err(CommsInterfaceError::UnexpectedApiResponse)
×
865
            },
866
            Err(e) => Err(e),
×
867
        }
868
    }
×
869

870
    /// Handle inbound blocks from remote nodes and local services.
871
    ///
872
    /// ## Arguments
873
    /// block - the block to store
874
    /// new_block_msg - propagate this new block message
875
    /// source_peer - the peer that sent this new block message, or None if the block was generated by a local miner
876
    pub async fn handle_block(
×
877
        &mut self,
×
878
        block: Block,
×
879
        source_peer: Option<NodeId>,
×
880
    ) -> Result<BlockHash, CommsInterfaceError> {
×
881
        let block_hash = block.hash();
×
882
        let block_height = block.header.height;
×
883

×
884
        info!(
×
885
            target: LOG_TARGET,
×
886
            "Block #{} ({}) received from {}",
×
887
            block_height,
×
888
            block_hash.to_hex(),
×
889
            source_peer
×
890
                .as_ref()
×
891
                .map(|p| format!("remote peer: {p}"))
×
892
                .unwrap_or_else(|| "local services".to_string())
×
893
        );
894
        debug!(target: LOG_TARGET, "Incoming block: {block}");
×
895
        let timer = Instant::now();
×
896
        let block = self.hydrate_block(block).await?;
×
897

898
        let add_block_result = self.blockchain_db.add_block(block.clone()).await;
×
899
        // Create block event on block event stream
900
        match add_block_result {
×
901
            Ok(block_add_result) => {
×
902
                debug!(
×
903
                    target: LOG_TARGET,
×
904
                    "Block #{} ({}) added ({}) to blockchain in {:.2?}",
×
905
                    block_height,
×
906
                    block_hash.to_hex(),
×
907
                    block_add_result,
×
908
                    timer.elapsed()
×
909
                );
910

911
                let should_propagate = match &block_add_result {
×
912
                    BlockAddResult::Ok(_) => true,
×
913
                    BlockAddResult::BlockExists => false,
×
914
                    BlockAddResult::OrphanBlock => false,
×
915
                    BlockAddResult::ChainReorg { .. } => true,
×
916
                };
917

918
                #[cfg(feature = "metrics")]
919
                self.update_block_result_metrics(&block_add_result).await?;
×
920

921
                self.publish_block_event(BlockEvent::ValidBlockAdded(block.clone(), block_add_result));
×
922

×
923
                if should_propagate {
×
924
                    debug!(
×
925
                        target: LOG_TARGET,
×
926
                        "Propagate block ({}) to network.",
×
927
                        block_hash.to_hex()
×
928
                    );
929
                    let exclude_peers = source_peer.into_iter().collect();
×
930
                    let new_block_msg = NewBlock::from(&*block);
×
931
                    if let Err(e) = self.outbound_nci.propagate_block(new_block_msg, exclude_peers).await {
×
932
                        warn!(
×
933
                            target: LOG_TARGET,
×
934
                            "Failed to propagate block ({}) to network: {}.",
×
935
                            block_hash.to_hex(), e
×
936
                        );
937
                    }
×
938
                }
×
939
                Ok(block_hash)
×
940
            },
941

942
            Err(e @ ChainStorageError::ValidationError { .. }) => {
×
943
                #[cfg(feature = "metrics")]
×
944
                {
×
945
                    let block_hash = block.hash();
×
946
                    metrics::rejected_blocks(block.header.height, &block_hash).inc();
×
947
                }
×
948
                warn!(
×
949
                    target: LOG_TARGET,
×
950
                    "Peer {} sent an invalid block: {}",
×
951
                    source_peer
×
952
                        .as_ref()
×
953
                        .map(ToString::to_string)
×
954
                        .unwrap_or_else(|| "<local request>".to_string()),
×
955
                    e
956
                );
957
                self.publish_block_event(BlockEvent::AddBlockValidationFailed { block, source_peer });
×
958
                Err(e.into())
×
959
            },
960

961
            Err(e) => {
×
962
                #[cfg(feature = "metrics")]
×
963
                metrics::rejected_blocks(block.header.height, &block.hash()).inc();
×
964

×
965
                self.publish_block_event(BlockEvent::AddBlockErrored { block });
×
966
                Err(e.into())
×
967
            },
968
        }
969
    }
×
970

971
    async fn hydrate_block(&mut self, block: Block) -> Result<Arc<Block>, CommsInterfaceError> {
×
972
        let block_hash = block.hash();
×
973
        let block_height = block.header.height;
×
974
        if block.body.inputs().is_empty() {
×
975
            debug!(
×
976
                target: LOG_TARGET,
×
977
                "Block #{} ({}) contains no inputs so nothing to hydrate",
×
978
                block_height,
×
979
                block_hash.to_hex(),
×
980
            );
981
            return Ok(Arc::new(block));
×
982
        }
×
983

×
984
        let timer = Instant::now();
×
985
        let (header, mut inputs, outputs, kernels) = block.dissolve();
×
986

987
        let db = self.blockchain_db.inner().db_read_access()?;
×
988
        for input in &mut inputs {
×
989
            if !input.is_compact() {
×
990
                continue;
×
991
            }
×
992

993
            let output_mined_info =
×
994
                db.fetch_output(&input.output_hash())?
×
995
                    .ok_or_else(|| CommsInterfaceError::InvalidFullBlock {
×
996
                        hash: block_hash,
×
997
                        details: format!("Output {} to be spent does not exist in db", input.output_hash()),
×
998
                    })?;
×
999

1000
            input.add_output_data(output_mined_info.output);
×
1001
        }
1002
        debug!(
×
1003
            target: LOG_TARGET,
×
1004
            "Hydrated block #{} ({}) with {} input(s) in {:.2?}",
×
1005
            block_height,
×
1006
            block_hash.to_hex(),
×
1007
            inputs.len(),
×
1008
            timer.elapsed()
×
1009
        );
1010
        let block = Block::new(header, AggregateBody::new(inputs, outputs, kernels));
×
1011
        Ok(Arc::new(block))
×
1012
    }
×
1013

1014
    fn publish_block_event(&self, event: BlockEvent) {
×
1015
        if let Err(event) = self.block_event_sender.send(Arc::new(event)) {
×
1016
            debug!(target: LOG_TARGET, "No event subscribers. Event {} dropped.", event.0)
×
1017
        }
×
1018
    }
×
1019

1020
    #[cfg(feature = "metrics")]
1021
    async fn update_block_result_metrics(&self, block_add_result: &BlockAddResult) -> Result<(), CommsInterfaceError> {
×
1022
        fn update_target_difficulty(block: &ChainBlock) {
×
1023
            match block.header().pow_algo() {
×
1024
                PowAlgorithm::Sha3x => {
×
1025
                    metrics::target_difficulty_sha()
×
1026
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1027
                },
×
1028
                PowAlgorithm::RandomXM => {
×
1029
                    metrics::target_difficulty_monero_randomx()
×
1030
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1031
                },
×
1032
                PowAlgorithm::RandomXT => {
×
1033
                    metrics::target_difficulty_tari_randomx()
×
1034
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1035
                },
×
1036
                PowAlgorithm::Cuckaroo => {
×
1037
                    metrics::target_difficulty_cuckaroo()
×
1038
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1039
                },
×
1040
            }
1041
        }
×
1042

1043
        match block_add_result {
×
1044
            BlockAddResult::Ok(ref block) => {
×
1045
                update_target_difficulty(block);
×
1046
                #[allow(clippy::cast_possible_wrap)]
×
1047
                metrics::tip_height().set(block.height() as i64);
×
1048
                let utxo_set_size = self.blockchain_db.utxo_count().await?;
×
1049
                metrics::utxo_set_size().set(utxo_set_size.try_into().unwrap_or(i64::MAX));
×
1050
            },
1051
            BlockAddResult::ChainReorg { added, removed } => {
×
1052
                if let Some(fork_height) = added.last().map(|b| b.height()) {
×
1053
                    #[allow(clippy::cast_possible_wrap)]
1054
                    metrics::tip_height().set(fork_height as i64);
×
1055
                    metrics::reorg(fork_height, added.len(), removed.len()).inc();
×
1056

1057
                    let utxo_set_size = self.blockchain_db.utxo_count().await?;
×
1058
                    metrics::utxo_set_size().set(utxo_set_size.try_into().unwrap_or(i64::MAX));
×
1059
                }
×
1060
                for block in added {
×
1061
                    update_target_difficulty(block);
×
1062
                }
×
1063
            },
1064
            BlockAddResult::OrphanBlock => {
×
1065
                metrics::orphaned_blocks().inc();
×
1066
            },
×
1067
            _ => {},
×
1068
        }
1069
        Ok(())
×
1070
    }
×
1071

1072
    async fn get_target_difficulty_for_next_block(
×
1073
        &self,
×
1074
        pow_algo: PowAlgorithm,
×
1075
        constants: &ConsensusConstants,
×
1076
        current_block_hash: HashOutput,
×
1077
    ) -> Result<Difficulty, CommsInterfaceError> {
×
1078
        let target_difficulty = self
×
1079
            .blockchain_db
×
1080
            .fetch_target_difficulty_for_next_block(pow_algo, current_block_hash)
×
1081
            .await?;
×
1082

1083
        let target = target_difficulty.calculate(
×
1084
            constants.min_pow_difficulty(pow_algo),
×
1085
            constants.max_pow_difficulty(pow_algo),
×
1086
        );
×
1087
        trace!(target: LOG_TARGET, "Target difficulty {target} for PoW {pow_algo}");
×
1088
        Ok(target)
×
1089
    }
×
1090

1091
    pub async fn get_last_seen_hash(&self) -> Result<FixedHash, CommsInterfaceError> {
×
1092
        self.mempool.get_last_seen_hash().await.map_err(|e| e.into())
×
1093
    }
×
1094
}
1095

1096
impl<B> Clone for InboundNodeCommsHandlers<B> {
1097
    fn clone(&self) -> Self {
×
1098
        Self {
×
1099
            block_event_sender: self.block_event_sender.clone(),
×
1100
            blockchain_db: self.blockchain_db.clone(),
×
1101
            mempool: self.mempool.clone(),
×
1102
            consensus_manager: self.consensus_manager.clone(),
×
1103
            list_of_reconciling_blocks: self.list_of_reconciling_blocks.clone(),
×
1104
            outbound_nci: self.outbound_nci.clone(),
×
1105
            connectivity: self.connectivity.clone(),
×
1106
            randomx_factory: self.randomx_factory.clone(),
×
1107
        }
×
1108
    }
×
1109
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc