• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 12808840268

16 Jan 2025 12:26PM UTC coverage: 65.465% (+0.003%) from 65.462%
12808840268

push

github

web-flow
feat: new grpc method (#6742)

Description
---
Creates a new grpc method to get the reward of the nextblock, the sha +
rx estimate hash rates and metadata tip info
Adds caching to grpc calls

Motivation and Context
---
Currently, universe calls get_template to just read the reward which is
an expensive operation.
It also streams up to 100 headers to calculate the estimated hash rate. 

Both of these operations can be made much simpler and faster. This PR
provides a call to do just that.
This Pr also adds cahcing to the calls to only update if an update is
required.

0 of 39 new or added lines in 5 files covered. (0.0%)

8 existing lines in 2 files now uncovered.

73018 of 111538 relevant lines covered (65.46%)

273554.43 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

53.66
/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs
1
// Copyright 2019. The Tari Project
2
//
3
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
4
// following conditions are met:
5
//
6
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
7
// disclaimer.
8
//
9
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
10
// following disclaimer in the documentation and/or other materials provided with the distribution.
11
//
12
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
13
// products derived from this software without specific prior written permission.
14
//
15
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
20
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
21
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22

23
#[cfg(feature = "metrics")]
24
use std::convert::{TryFrom, TryInto};
25
use std::{cmp::max, collections::HashSet, sync::Arc, time::Instant};
26

27
use log::*;
28
use strum_macros::Display;
29
use tari_common_types::types::{BlockHash, FixedHash, HashOutput};
30
use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId};
31
use tari_utilities::hex::Hex;
32
use tokio::sync::RwLock;
33

34
#[cfg(feature = "metrics")]
35
use crate::base_node::metrics;
36
use crate::{
37
    base_node::comms_interface::{
38
        error::CommsInterfaceError,
39
        local_interface::BlockEventSender,
40
        FetchMempoolTransactionsResponse,
41
        NodeCommsRequest,
42
        NodeCommsResponse,
43
        OutboundNodeCommsInterface,
44
    },
45
    blocks::{Block, BlockBuilder, BlockHeader, BlockHeaderValidationError, ChainBlock, NewBlock, NewBlockTemplate},
46
    chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainStorageError},
47
    consensus::{ConsensusConstants, ConsensusManager},
48
    mempool::Mempool,
49
    proof_of_work::{
50
        randomx_difficulty,
51
        randomx_factory::RandomXFactory,
52
        sha3x_difficulty,
53
        Difficulty,
54
        PowAlgorithm,
55
        PowError,
56
    },
57
    transactions::aggregated_body::AggregateBody,
58
    validation::{helpers, ValidationError},
59
};
60

61
const LOG_TARGET: &str = "c::bn::comms_interface::inbound_handler";
62
const MAX_REQUEST_BY_BLOCK_HASHES: usize = 100;
63
const MAX_REQUEST_BY_KERNEL_EXCESS_SIGS: usize = 100;
64
const MAX_REQUEST_BY_UTXO_HASHES: usize = 100;
65
const MAX_MEMPOOL_TIMEOUT: u64 = 150;
66

67
/// Events that can be published on the Validated Block Event Stream
68
/// Broadcast is to notify subscribers if this is a valid propagated block event
69
#[derive(Debug, Clone, Display)]
×
70
pub enum BlockEvent {
71
    ValidBlockAdded(Arc<Block>, BlockAddResult),
72
    AddBlockValidationFailed {
73
        block: Arc<Block>,
74
        source_peer: Option<NodeId>,
75
    },
76
    AddBlockErrored {
77
        block: Arc<Block>,
78
    },
79
    BlockSyncComplete(Arc<ChainBlock>, u64),
80
    BlockSyncRewind(Vec<Arc<ChainBlock>>),
81
}
82

83
/// The InboundNodeCommsInterface is used to handle all received inbound requests from remote nodes.
84
pub struct InboundNodeCommsHandlers<B> {
85
    block_event_sender: BlockEventSender,
86
    blockchain_db: AsyncBlockchainDb<B>,
87
    mempool: Mempool,
88
    consensus_manager: ConsensusManager,
89
    list_of_reconciling_blocks: Arc<RwLock<HashSet<HashOutput>>>,
90
    outbound_nci: OutboundNodeCommsInterface,
91
    connectivity: ConnectivityRequester,
92
    randomx_factory: RandomXFactory,
93
}
94

95
impl<B> InboundNodeCommsHandlers<B>
96
where B: BlockchainBackend + 'static
97
{
98
    /// Construct a new InboundNodeCommsInterface.
99
    pub fn new(
64✔
100
        block_event_sender: BlockEventSender,
64✔
101
        blockchain_db: AsyncBlockchainDb<B>,
64✔
102
        mempool: Mempool,
64✔
103
        consensus_manager: ConsensusManager,
64✔
104
        outbound_nci: OutboundNodeCommsInterface,
64✔
105
        connectivity: ConnectivityRequester,
64✔
106
        randomx_factory: RandomXFactory,
64✔
107
    ) -> Self {
64✔
108
        Self {
64✔
109
            block_event_sender,
64✔
110
            blockchain_db,
64✔
111
            mempool,
64✔
112
            consensus_manager,
64✔
113
            list_of_reconciling_blocks: Arc::new(RwLock::new(HashSet::new())),
64✔
114
            outbound_nci,
64✔
115
            connectivity,
64✔
116
            randomx_factory,
64✔
117
        }
64✔
118
    }
64✔
119

120
    /// Handle inbound node comms requests from remote nodes and local services.
121
    #[allow(clippy::too_many_lines)]
122
    pub async fn handle_request(&self, request: NodeCommsRequest) -> Result<NodeCommsResponse, CommsInterfaceError> {
178✔
123
        trace!(target: LOG_TARGET, "Handling remote request {}", request);
178✔
124
        match request {
178✔
125
            NodeCommsRequest::GetChainMetadata => Ok(NodeCommsResponse::ChainMetadata(
126
                self.blockchain_db.get_chain_metadata().await?,
158✔
127
            )),
NEW
128
            NodeCommsRequest::GetTargetDifficultyNextBlock(algo) => {
×
NEW
129
                let header = self.blockchain_db.fetch_tip_header().await?;
×
NEW
130
                let constants = self.consensus_manager.consensus_constants(header.header().height);
×
NEW
131
                let target_difficulty = self
×
NEW
132
                    .get_target_difficulty_for_next_block(algo, constants, *header.hash())
×
NEW
133
                    .await?;
×
NEW
134
                Ok(NodeCommsResponse::TargetDifficulty(target_difficulty))
×
135
            },
136
            NodeCommsRequest::FetchHeaders(range) => {
1✔
137
                let headers = self.blockchain_db.fetch_chain_headers(range).await?;
1✔
138
                Ok(NodeCommsResponse::BlockHeaders(headers))
1✔
139
            },
140
            NodeCommsRequest::FetchHeadersByHashes(block_hashes) => {
×
141
                if block_hashes.len() > MAX_REQUEST_BY_BLOCK_HASHES {
×
142
                    return Err(CommsInterfaceError::InvalidRequest {
×
143
                        request: "FetchHeadersByHashes",
×
144
                        details: format!(
×
145
                            "Exceeded maximum block hashes request (max: {}, got:{})",
×
146
                            MAX_REQUEST_BY_BLOCK_HASHES,
×
147
                            block_hashes.len()
×
148
                        ),
×
149
                    });
×
150
                }
×
151
                let mut block_headers = Vec::with_capacity(block_hashes.len());
×
152
                for block_hash in block_hashes {
×
153
                    let block_hex = block_hash.to_hex();
×
154
                    match self.blockchain_db.fetch_chain_header_by_block_hash(block_hash).await? {
×
155
                        Some(block_header) => {
×
156
                            block_headers.push(block_header);
×
157
                        },
×
158
                        None => {
159
                            error!(target: LOG_TARGET, "Could not fetch headers with hashes:{}", block_hex);
×
160
                            return Err(CommsInterfaceError::InternalError(format!(
×
161
                                "Could not fetch headers with hashes:{}",
×
162
                                block_hex
×
163
                            )));
×
164
                        },
165
                    }
166
                }
167
                Ok(NodeCommsResponse::BlockHeaders(block_headers))
×
168
            },
169
            NodeCommsRequest::FetchMatchingUtxos(utxo_hashes) => {
1✔
170
                let mut res = Vec::with_capacity(utxo_hashes.len());
1✔
171
                for (output, spent) in (self
1✔
172
                    .blockchain_db
1✔
173
                    .fetch_outputs_with_spend_status_at_tip(utxo_hashes)
1✔
174
                    .await?)
1✔
175
                    .into_iter()
1✔
176
                    .flatten()
1✔
177
                {
178
                    if !spent {
1✔
179
                        res.push(output);
1✔
180
                    }
1✔
181
                }
182
                Ok(NodeCommsResponse::TransactionOutputs(res))
1✔
183
            },
184
            NodeCommsRequest::FetchMatchingBlocks { range, compact } => {
3✔
185
                let blocks = self.blockchain_db.fetch_blocks(range, compact).await?;
3✔
186
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
3✔
187
            },
188
            NodeCommsRequest::FetchBlocksByKernelExcessSigs(excess_sigs) => {
×
189
                if excess_sigs.len() > MAX_REQUEST_BY_KERNEL_EXCESS_SIGS {
×
190
                    return Err(CommsInterfaceError::InvalidRequest {
×
191
                        request: "FetchBlocksByKernelExcessSigs",
×
192
                        details: format!(
×
193
                            "Exceeded maximum number of kernel excess sigs in request (max: {}, got:{})",
×
194
                            MAX_REQUEST_BY_KERNEL_EXCESS_SIGS,
×
195
                            excess_sigs.len()
×
196
                        ),
×
197
                    });
×
198
                }
×
199
                let mut blocks = Vec::with_capacity(excess_sigs.len());
×
200
                for sig in excess_sigs {
×
201
                    let sig_hex = sig.get_signature().to_hex();
×
202
                    debug!(
×
203
                        target: LOG_TARGET,
×
204
                        "A peer has requested a block with kernel with sig {}", sig_hex
×
205
                    );
206
                    match self.blockchain_db.fetch_block_with_kernel(sig).await {
×
207
                        Ok(Some(block)) => blocks.push(block),
×
208
                        Ok(None) => warn!(
×
209
                            target: LOG_TARGET,
×
210
                            "Could not provide requested block containing kernel with sig {} to peer because not \
×
211
                             stored",
×
212
                            sig_hex
213
                        ),
214
                        Err(e) => warn!(
×
215
                            target: LOG_TARGET,
×
216
                            "Could not provide requested block containing kernel with sig {} to peer because: {}",
×
217
                            sig_hex,
×
218
                            e.to_string()
×
219
                        ),
220
                    }
221
                }
222
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
223
            },
224
            NodeCommsRequest::FetchBlocksByUtxos(commitments) => {
×
225
                if commitments.len() > MAX_REQUEST_BY_UTXO_HASHES {
×
226
                    return Err(CommsInterfaceError::InvalidRequest {
×
227
                        request: "FetchBlocksByUtxos",
×
228
                        details: format!(
×
229
                            "Exceeded maximum number of utxo hashes in request (max: {}, got:{})",
×
230
                            MAX_REQUEST_BY_UTXO_HASHES,
×
231
                            commitments.len()
×
232
                        ),
×
233
                    });
×
234
                }
×
235
                let mut blocks = Vec::with_capacity(commitments.len());
×
236
                for commitment in commitments {
×
237
                    let commitment_hex = commitment.to_hex();
×
238
                    debug!(
×
239
                        target: LOG_TARGET,
×
240
                        "A peer has requested a block with commitment {}", commitment_hex,
×
241
                    );
242
                    match self.blockchain_db.fetch_block_with_utxo(commitment).await {
×
243
                        Ok(Some(block)) => blocks.push(block),
×
244
                        Ok(None) => warn!(
×
245
                            target: LOG_TARGET,
×
246
                            "Could not provide requested block with commitment {} to peer because not stored",
×
247
                            commitment_hex,
248
                        ),
249
                        Err(e) => warn!(
×
250
                            target: LOG_TARGET,
×
251
                            "Could not provide requested block with commitment {} to peer because: {}",
×
252
                            commitment_hex,
×
253
                            e.to_string()
×
254
                        ),
255
                    }
256
                }
257
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
258
            },
259
            NodeCommsRequest::GetHeaderByHash(hash) => {
×
260
                let header = self.blockchain_db.fetch_chain_header_by_block_hash(hash).await?;
×
261
                Ok(NodeCommsResponse::BlockHeader(header))
×
262
            },
263
            NodeCommsRequest::GetBlockByHash(hash) => {
×
264
                let block = self.blockchain_db.fetch_block_by_hash(hash, false).await?;
×
265
                Ok(NodeCommsResponse::HistoricalBlock(Box::new(block)))
×
266
            },
267
            NodeCommsRequest::GetNewBlockTemplate(request) => {
3✔
268
                let best_block_header = self.blockchain_db.fetch_tip_header().await?;
3✔
269
                let mut last_seen_hash = self.mempool.get_last_seen_hash().await?;
3✔
270
                let mut is_mempool_synced = false;
3✔
271
                let start = Instant::now();
3✔
272
                // this will wait a max of 150ms by default before returning anyway with a potential broken template
273
                // We need to ensure the mempool has seen the latest base node height before we can be confident the
274
                // template is correct
275
                while !is_mempool_synced && start.elapsed().as_millis() < MAX_MEMPOOL_TIMEOUT.into() {
45✔
276
                    if best_block_header.hash() == &last_seen_hash {
42✔
277
                        is_mempool_synced = true;
×
278
                    } else {
×
279
                        tokio::time::sleep(std::time::Duration::from_millis(10)).await;
42✔
280
                        last_seen_hash = self.mempool.get_last_seen_hash().await?;
42✔
281
                    }
282
                }
283

284
                if !is_mempool_synced {
3✔
285
                    warn!(
3✔
286
                        target: LOG_TARGET,
×
287
                        "Mempool out of sync - last seen hash '{}' does not match the tip hash '{}'. This condition \
×
288
                         should auto correct with the next block template request",
×
289
                        last_seen_hash, best_block_header.hash()
×
290
                    );
291
                }
×
292
                let mut header = BlockHeader::from_previous(best_block_header.header());
3✔
293
                let constants = self.consensus_manager.consensus_constants(header.height);
3✔
294
                header.version = constants.blockchain_version();
3✔
295
                header.pow.pow_algo = request.algo;
3✔
296

297
                let constants_weight = constants
3✔
298
                    .max_block_weight_excluding_coinbases(1)
3✔
299
                    .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?;
3✔
300
                let asking_weight = if request.max_weight > constants_weight || request.max_weight == 0 {
3✔
301
                    constants_weight
3✔
302
                } else {
303
                    request.max_weight
×
304
                };
305

306
                debug!(
3✔
307
                    target: LOG_TARGET,
×
308
                    "Fetching transactions with a maximum weight of {} for the template", asking_weight
×
309
                );
310
                let transactions = self
3✔
311
                    .mempool
3✔
312
                    .retrieve(asking_weight)
3✔
313
                    .await?
3✔
314
                    .into_iter()
3✔
315
                    .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone()))
8✔
316
                    .collect::<Vec<_>>();
3✔
317

3✔
318
                debug!(
3✔
319
                    target: LOG_TARGET,
×
320
                    "Adding {} transaction(s) to new block template",
×
321
                    transactions.len(),
×
322
                );
323

324
                let prev_hash = header.prev_hash;
3✔
325
                let height = header.height;
3✔
326

3✔
327
                let block = header.into_builder().with_transactions(transactions).build();
3✔
328
                let block_hash = block.hash();
3✔
329
                let block_template = NewBlockTemplate::from_block(
3✔
330
                    block,
3✔
331
                    self.get_target_difficulty_for_next_block(request.algo, constants, prev_hash)
3✔
332
                        .await?,
3✔
333
                    self.consensus_manager.get_block_reward_at(height),
3✔
334
                    is_mempool_synced,
3✔
335
                )?;
×
336

337
                debug!(target: LOG_TARGET,
3✔
338
                    "New block template requested and prepared at height: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}",
×
339
                    block_template.header.height,
×
340
                    block_template.target_difficulty,
×
341
                    block_hash.to_hex(),
×
342
                    block_template
×
343
                        .body
×
344
                        .calculate_weight(constants.transaction_weight_params())
×
345
                        .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?,
×
346
                    block_template.body.to_counts_string()
×
347
                );
348

349
                Ok(NodeCommsResponse::NewBlockTemplate(block_template))
3✔
350
            },
351
            NodeCommsRequest::GetNewBlock(block_template) => {
3✔
352
                let height = block_template.header.height;
3✔
353
                let target_difficulty = block_template.target_difficulty;
3✔
354
                let block = self.blockchain_db.prepare_new_block(block_template).await?;
3✔
355
                let constants = self.consensus_manager.consensus_constants(block.header.height);
3✔
356
                debug!(target: LOG_TARGET,
3✔
357
                    "Prepared block: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}",
×
358
                    height,
×
359
                    target_difficulty,
×
360
                    block.hash().to_hex(),
×
361
                    block
×
362
                        .body
×
363
                        .calculate_weight(constants.transaction_weight_params())
×
364
                        .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?,
×
365
                    block.body.to_counts_string()
×
366
                );
367
                Ok(NodeCommsResponse::NewBlock {
3✔
368
                    success: true,
3✔
369
                    error: None,
3✔
370
                    block: Some(block),
3✔
371
                })
3✔
372
            },
373
            NodeCommsRequest::GetBlockFromAllChains(hash) => {
4✔
374
                let block_hex = hash.to_hex();
4✔
375
                debug!(
4✔
376
                    target: LOG_TARGET,
×
377
                    "A peer has requested a block with hash {}", block_hex
×
378
                );
379

380
                #[allow(clippy::blocks_in_conditions)]
381
                let maybe_block = match self
4✔
382
                    .blockchain_db
4✔
383
                    .fetch_block_by_hash(hash, true)
4✔
384
                    .await
4✔
385
                    .unwrap_or_else(|e| {
4✔
386
                        warn!(
×
387
                            target: LOG_TARGET,
×
388
                            "Could not provide requested block {} to peer because: {}",
×
389
                            block_hex,
×
390
                            e.to_string()
×
391
                        );
392

393
                        None
×
394
                    }) {
4✔
395
                    None => self.blockchain_db.fetch_orphan(hash).await.map_or_else(
1✔
396
                        |e| {
1✔
397
                            warn!(
1✔
398
                                target: LOG_TARGET,
×
399
                                "Could not provide requested block {} to peer because: {}", block_hex, e,
×
400
                            );
401

402
                            None
1✔
403
                        },
1✔
404
                        Some,
1✔
405
                    ),
1✔
406
                    Some(block) => Some(block.into_block()),
3✔
407
                };
408

409
                Ok(NodeCommsResponse::Block(Box::new(maybe_block)))
4✔
410
            },
411
            NodeCommsRequest::FetchKernelByExcessSig(signature) => {
1✔
412
                let kernels = match self.blockchain_db.fetch_kernel_by_excess_sig(signature).await {
1✔
413
                    Ok(Some((kernel, _))) => vec![kernel],
1✔
414
                    Ok(None) => vec![],
×
415
                    Err(err) => {
×
416
                        error!(target: LOG_TARGET, "Could not fetch kernel {}", err.to_string());
×
417
                        return Err(err.into());
×
418
                    },
419
                };
420

421
                Ok(NodeCommsResponse::TransactionKernels(kernels))
1✔
422
            },
423
            NodeCommsRequest::FetchMempoolTransactionsByExcessSigs { excess_sigs } => {
4✔
424
                let (transactions, not_found) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?;
4✔
425
                Ok(NodeCommsResponse::FetchMempoolTransactionsByExcessSigsResponse(
4✔
426
                    FetchMempoolTransactionsResponse {
4✔
427
                        transactions,
4✔
428
                        not_found,
4✔
429
                    },
4✔
430
                ))
4✔
431
            },
432
            NodeCommsRequest::FetchValidatorNodesKeys { height } => {
×
433
                let active_validator_nodes = self.blockchain_db.fetch_active_validator_nodes(height).await?;
×
434
                Ok(NodeCommsResponse::FetchValidatorNodesKeysResponse(
×
435
                    active_validator_nodes,
×
436
                ))
×
437
            },
438
            NodeCommsRequest::GetShardKey { height, public_key } => {
×
439
                let shard_key = self.blockchain_db.get_shard_key(height, public_key).await?;
×
440
                Ok(NodeCommsResponse::GetShardKeyResponse(shard_key))
×
441
            },
442
            NodeCommsRequest::FetchTemplateRegistrations {
443
                start_height,
×
444
                end_height,
×
445
            } => {
446
                let template_registrations = self
×
447
                    .blockchain_db
×
448
                    .fetch_template_registrations(start_height..=end_height)
×
449
                    .await?;
×
450
                Ok(NodeCommsResponse::FetchTemplateRegistrationsResponse(
×
451
                    template_registrations,
×
452
                ))
×
453
            },
454
            NodeCommsRequest::FetchUnspentUtxosInBlock { block_hash } => {
×
455
                let utxos = self.blockchain_db.fetch_outputs_in_block(block_hash).await?;
×
456
                Ok(NodeCommsResponse::TransactionOutputs(utxos))
×
457
            },
458
        }
459
    }
178✔
460

461
    /// Handles a `NewBlock` message. Only a single `NewBlock` message can be handled at once to prevent extraneous
462
    /// requests for the full block.
463
    /// This may (asynchronously) block until the other request(s) complete or time out and so should typically be
464
    /// executed in a dedicated task.
465
    pub async fn handle_new_block_message(
21✔
466
        &mut self,
21✔
467
        new_block: NewBlock,
21✔
468
        source_peer: NodeId,
21✔
469
    ) -> Result<(), CommsInterfaceError> {
21✔
470
        let block_hash = new_block.header.hash();
21✔
471

21✔
472
        if self.blockchain_db.inner().is_add_block_disabled() {
21✔
473
            info!(
×
474
                target: LOG_TARGET,
×
475
                "Ignoring block message ({}) because add_block is locked",
×
476
                block_hash.to_hex()
×
477
            );
478
            return Ok(());
×
479
        }
21✔
480

21✔
481
        // Lets check if the block exists before we try and ask for a complete block
21✔
482
        if self.check_exists_and_not_bad_block(block_hash).await? {
41✔
483
            return Ok(());
×
484
        }
21✔
485

21✔
486
        // lets check that the difficulty at least matches 50% of the tip header. The max difficulty drop is 16%, thus
21✔
487
        // 50% is way more than that and in order to attack the node, you need 50% of the mining power. We cannot check
21✔
488
        // the target difficulty as orphan blocks dont have a target difficulty. All we care here is that bad
21✔
489
        // blocks are not free to make, and that they are more expensive to make then they are to validate. As
21✔
490
        // soon as a block can be linked to the main chain, a proper full proof of work check will
21✔
491
        // be done before any other validation.
21✔
492
        self.check_min_block_difficulty(&new_block).await?;
21✔
493

494
        {
495
            // we use a double lock to make sure we can only reconcile one unique block at a time. We may receive the
496
            // same block from multiple peer near simultaneously. We should only reconcile each unique block once.
497
            let read_lock = self.list_of_reconciling_blocks.read().await;
21✔
498
            if read_lock.contains(&block_hash) {
21✔
499
                debug!(
×
500
                    target: LOG_TARGET,
×
501
                    "Block with hash `{}` is already being reconciled",
×
502
                    block_hash.to_hex()
×
503
                );
504
                return Ok(());
×
505
            }
21✔
506
        }
507
        {
508
            let mut write_lock = self.list_of_reconciling_blocks.write().await;
21✔
509
            if self.check_exists_and_not_bad_block(block_hash).await? {
41✔
510
                return Ok(());
×
511
            }
21✔
512

21✔
513
            if !write_lock.insert(block_hash) {
21✔
514
                debug!(
×
515
                    target: LOG_TARGET,
×
516
                    "Block with hash `{}` is already being reconciled",
×
517
                    block_hash.to_hex()
×
518
                );
519
                return Ok(());
×
520
            }
21✔
521
        }
21✔
522

21✔
523
        debug!(
21✔
524
            target: LOG_TARGET,
×
525
            "Block with hash `{}` is unknown. Constructing block from known mempool transactions / requesting missing \
×
526
             transactions from peer '{}'.",
×
527
            block_hash.to_hex(),
×
528
            source_peer
529
        );
530

531
        let result = self.reconcile_and_add_block(source_peer.clone(), new_block).await;
54✔
532

533
        {
534
            let mut write_lock = self.list_of_reconciling_blocks.write().await;
21✔
535
            write_lock.remove(&block_hash);
21✔
536
        }
21✔
537
        result?;
21✔
538
        Ok(())
18✔
539
    }
21✔
540

541
    async fn check_min_block_difficulty(&self, new_block: &NewBlock) -> Result<(), CommsInterfaceError> {
21✔
542
        let constants = self.consensus_manager.consensus_constants(new_block.header.height);
21✔
543
        let gen_hash = *self.consensus_manager.get_genesis_block().hash();
21✔
544
        let mut min_difficulty = constants.min_pow_difficulty(new_block.header.pow.pow_algo);
21✔
545
        let mut header = self.blockchain_db.fetch_last_chain_header().await?;
21✔
546
        loop {
547
            if new_block.header.pow_algo() == header.header().pow_algo() {
21✔
548
                min_difficulty = max(
21✔
549
                    header
21✔
550
                        .accumulated_data()
21✔
551
                        .target_difficulty
21✔
552
                        .checked_div_u64(2)
21✔
553
                        .unwrap_or(min_difficulty),
21✔
554
                    min_difficulty,
21✔
555
                );
21✔
556
                break;
21✔
557
            }
×
558
            if header.height() == 0 {
×
559
                break;
×
560
            }
×
561
            // we have not reached gen block, and the pow algo does not match, so lets go further back
562
            header = self
×
563
                .blockchain_db
×
564
                .fetch_chain_header(header.height().saturating_sub(1))
×
565
                .await?;
×
566
        }
567
        let achieved = match new_block.header.pow_algo() {
21✔
568
            PowAlgorithm::RandomX => randomx_difficulty(
×
569
                &new_block.header,
×
570
                &self.randomx_factory,
×
571
                &gen_hash,
×
572
                &self.consensus_manager,
×
573
            )?,
×
574
            PowAlgorithm::Sha3x => sha3x_difficulty(&new_block.header)?,
21✔
575
        };
576
        if achieved < min_difficulty {
21✔
577
            return Err(CommsInterfaceError::InvalidBlockHeader(
×
578
                BlockHeaderValidationError::ProofOfWorkError(PowError::AchievedDifficultyBelowMin),
×
579
            ));
×
580
        }
21✔
581
        Ok(())
21✔
582
    }
21✔
583

584
    async fn check_exists_and_not_bad_block(&self, block: FixedHash) -> Result<bool, CommsInterfaceError> {
42✔
585
        if self.blockchain_db.chain_header_or_orphan_exists(block).await? {
42✔
586
            debug!(
×
587
                target: LOG_TARGET,
×
588
                "Block with hash `{}` already stored",
×
589
                block.to_hex()
×
590
            );
591
            return Ok(true);
×
592
        }
42✔
593
        let block_exist = self.blockchain_db.bad_block_exists(block).await?;
42✔
594
        if block_exist.0 {
42✔
595
            debug!(
×
596
                target: LOG_TARGET,
×
597
                "Block with hash `{}` already validated as a bad block due to {}",
×
598
                block.to_hex(), block_exist.1
×
599
            );
600
            return Err(CommsInterfaceError::ChainStorageError(
×
601
                ChainStorageError::ValidationError {
×
602
                    source: ValidationError::BadBlockFound {
×
603
                        hash: block.to_hex(),
×
604
                        reason: block_exist.1,
×
605
                    },
×
606
                },
×
607
            ));
×
608
        }
42✔
609
        Ok(false)
42✔
610
    }
42✔
611

612
    async fn reconcile_and_add_block(
21✔
613
        &mut self,
21✔
614
        source_peer: NodeId,
21✔
615
        new_block: NewBlock,
21✔
616
    ) -> Result<(), CommsInterfaceError> {
21✔
617
        let block = self.reconcile_block(source_peer.clone(), new_block).await?;
21✔
618
        self.handle_block(block, Some(source_peer)).await?;
38✔
619
        Ok(())
18✔
620
    }
21✔
621

622
    #[allow(clippy::too_many_lines)]
623
    async fn reconcile_block(
21✔
624
        &mut self,
21✔
625
        source_peer: NodeId,
21✔
626
        new_block: NewBlock,
21✔
627
    ) -> Result<Block, CommsInterfaceError> {
21✔
628
        let NewBlock {
21✔
629
            header,
21✔
630
            coinbase_kernels,
21✔
631
            coinbase_outputs,
21✔
632
            kernel_excess_sigs: excess_sigs,
21✔
633
        } = new_block;
21✔
634
        // If the block is empty, we dont have to ask for the block, as we already have the full block available
21✔
635
        // to us.
21✔
636
        if excess_sigs.is_empty() {
21✔
637
            let block = BlockBuilder::new(header.version)
17✔
638
                .add_outputs(coinbase_outputs)
17✔
639
                .add_kernels(coinbase_kernels)
17✔
640
                .with_header(header)
17✔
641
                .build();
17✔
642
            return Ok(block);
17✔
643
        }
4✔
644

4✔
645
        let block_hash = header.hash();
4✔
646
        // We check the current tip and orphan status of the block because we cannot guarantee that mempool state is
647
        // correct and the mmr root calculation is only valid if the block is building on the tip.
648
        let current_meta = self.blockchain_db.get_chain_metadata().await?;
4✔
649
        if header.prev_hash != *current_meta.best_block_hash() {
4✔
650
            debug!(
×
651
                target: LOG_TARGET,
×
652
                "Orphaned block #{}: ({}), current tip is: #{} ({}). We need to fetch the complete block from peer: \
×
653
                 ({})",
×
654
                header.height,
×
655
                block_hash.to_hex(),
×
656
                current_meta.best_block_height(),
×
657
                current_meta.best_block_hash().to_hex(),
×
658
                source_peer,
659
            );
660
            #[allow(clippy::cast_possible_wrap)]
661
            #[cfg(feature = "metrics")]
662
            metrics::compact_block_tx_misses(header.height).set(excess_sigs.len() as i64);
×
663
            let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
664
            return Ok(block);
×
665
        }
4✔
666

667
        // We know that the block is neither and orphan or a coinbase, so lets ask our mempool for the transactions
668
        let (known_transactions, missing_excess_sigs) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?;
4✔
669
        let known_transactions = known_transactions.into_iter().map(|tx| (*tx).clone()).collect();
4✔
670

4✔
671
        #[allow(clippy::cast_possible_wrap)]
4✔
672
        #[cfg(feature = "metrics")]
4✔
673
        metrics::compact_block_tx_misses(header.height).set(missing_excess_sigs.len() as i64);
4✔
674

4✔
675
        let mut builder = BlockBuilder::new(header.version)
4✔
676
            .add_outputs(coinbase_outputs)
4✔
677
            .add_kernels(coinbase_kernels)
4✔
678
            .with_transactions(known_transactions);
4✔
679

4✔
680
        if missing_excess_sigs.is_empty() {
4✔
681
            debug!(
×
682
                target: LOG_TARGET,
×
683
                "All transactions for block #{} ({}) found in mempool",
×
684
                header.height,
×
685
                block_hash.to_hex()
×
686
            );
687
        } else {
688
            debug!(
4✔
689
                target: LOG_TARGET,
×
690
                "Requesting {} unknown transaction(s) from peer '{}'.",
×
691
                missing_excess_sigs.len(),
×
692
                source_peer
693
            );
694

695
            let FetchMempoolTransactionsResponse {
696
                transactions,
4✔
697
                not_found,
4✔
698
            } = self
4✔
699
                .outbound_nci
4✔
700
                .request_transactions_by_excess_sig(source_peer.clone(), missing_excess_sigs)
4✔
701
                .await?;
4✔
702

703
            // Add returned transactions to unconfirmed pool
704
            if !transactions.is_empty() {
4✔
705
                self.mempool.insert_all(transactions.clone()).await?;
×
706
            }
4✔
707

708
            if !not_found.is_empty() {
4✔
709
                warn!(
4✔
710
                    target: LOG_TARGET,
×
711
                    "Peer {} was not able to return all transactions for block #{} ({}). {} transaction(s) not found. \
×
712
                     Requesting full block.",
×
713
                    source_peer,
×
714
                    header.height,
×
715
                    block_hash.to_hex(),
×
716
                    not_found.len()
×
717
                );
718

719
                #[cfg(feature = "metrics")]
720
                metrics::compact_block_full_misses(header.height).inc();
4✔
721
                let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
4✔
722
                return Ok(block);
3✔
723
            }
×
724

×
725
            builder = builder.with_transactions(
×
726
                transactions
×
727
                    .into_iter()
×
728
                    .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone()))
×
729
                    .collect(),
×
730
            );
×
731
        }
×
732

733
        // NB: Add the header last because `with_transactions` etc updates the current header, but we have the final one
734
        // already
735
        builder = builder.with_header(header.clone());
×
736
        let block = builder.build();
×
737

738
        // Perform a sanity check on the reconstructed block, if the MMR roots don't match then it's possible one or
739
        // more transactions in our mempool had the same excess/signature for a *different* transaction.
740
        // This is extremely unlikely, but still possible. In case of a mismatch, request the full block from the peer.
741
        let (block, mmr_roots) = match self.blockchain_db.calculate_mmr_roots(block).await {
×
742
            Err(_) => {
743
                let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
744
                return Ok(block);
×
745
            },
746
            Ok(v) => v,
×
747
        };
748
        if let Err(e) = helpers::check_mmr_roots(&header, &mmr_roots) {
×
749
            warn!(
×
750
                target: LOG_TARGET,
×
751
                "Reconstructed block #{} ({}) failed MMR check validation!. Requesting full block. Error: {}",
×
752
                header.height,
×
753
                block_hash.to_hex(),
×
754
                e,
755
            );
756

757
            #[cfg(feature = "metrics")]
758
            metrics::compact_block_mmr_mismatch(header.height).inc();
×
759
            let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
760
            return Ok(block);
×
761
        }
×
762

×
763
        Ok(block)
×
764
    }
21✔
765

766
    async fn request_full_block_from_peer(
4✔
767
        &mut self,
4✔
768
        source_peer: NodeId,
4✔
769
        block_hash: BlockHash,
4✔
770
    ) -> Result<Block, CommsInterfaceError> {
4✔
771
        match self
4✔
772
            .outbound_nci
4✔
773
            .request_blocks_by_hashes_from_peer(block_hash, Some(source_peer.clone()))
4✔
774
            .await
4✔
775
        {
776
            Ok(Some(block)) => Ok(block),
3✔
777
            Ok(None) => {
778
                debug!(
1✔
779
                    target: LOG_TARGET,
×
780
                    "Peer `{}` failed to return the block that was requested.", source_peer
×
781
                );
782
                Err(CommsInterfaceError::InvalidPeerResponse(format!(
1✔
783
                    "Invalid response from peer `{}`: Peer failed to provide the block that was propagated",
1✔
784
                    source_peer
1✔
785
                )))
1✔
786
            },
787
            Err(CommsInterfaceError::UnexpectedApiResponse) => {
788
                debug!(
×
789
                    target: LOG_TARGET,
×
790
                    "Peer `{}` sent unexpected API response.", source_peer
×
791
                );
792
                Err(CommsInterfaceError::UnexpectedApiResponse)
×
793
            },
794
            Err(e) => Err(e),
×
795
        }
796
    }
4✔
797

798
    /// Handle inbound blocks from remote nodes and local services.
799
    ///
800
    /// ## Arguments
801
    /// block - the block to store
802
    /// new_block_msg - propagate this new block message
803
    /// source_peer - the peer that sent this new block message, or None if the block was generated by a local miner
804
    pub async fn handle_block(
32✔
805
        &mut self,
32✔
806
        block: Block,
32✔
807
        source_peer: Option<NodeId>,
32✔
808
    ) -> Result<BlockHash, CommsInterfaceError> {
32✔
809
        let block_hash = block.hash();
32✔
810
        let block_height = block.header.height;
32✔
811

32✔
812
        info!(
32✔
813
            target: LOG_TARGET,
×
814
            "Block #{} ({}) received from {}",
×
815
            block_height,
×
816
            block_hash.to_hex(),
×
817
            source_peer
×
818
                .as_ref()
×
819
                .map(|p| format!("remote peer: {}", p))
×
820
                .unwrap_or_else(|| "local services".to_string())
×
821
        );
822
        debug!(target: LOG_TARGET, "Incoming block: {}", block);
32✔
823
        let timer = Instant::now();
32✔
824
        let block = self.hydrate_block(block).await?;
32✔
825

826
        let add_block_result = self.blockchain_db.add_block(block.clone()).await;
32✔
827
        // Create block event on block event stream
828
        match add_block_result {
2✔
829
            Ok(block_add_result) => {
30✔
830
                debug!(
30✔
831
                    target: LOG_TARGET,
×
832
                    "Block #{} ({}) added ({}) to blockchain in {:.2?}",
×
833
                    block_height,
×
834
                    block_hash.to_hex(),
×
835
                    block_add_result,
×
836
                    timer.elapsed()
×
837
                );
838

839
                let should_propagate = match &block_add_result {
30✔
840
                    BlockAddResult::Ok(_) => true,
30✔
841
                    BlockAddResult::BlockExists => false,
×
842
                    BlockAddResult::OrphanBlock => false,
×
843
                    BlockAddResult::ChainReorg { .. } => true,
×
844
                };
845

846
                #[cfg(feature = "metrics")]
847
                self.update_block_result_metrics(&block_add_result).await?;
30✔
848

849
                self.publish_block_event(BlockEvent::ValidBlockAdded(block.clone(), block_add_result));
30✔
850

30✔
851
                if should_propagate {
30✔
852
                    debug!(
30✔
853
                        target: LOG_TARGET,
×
854
                        "Propagate block ({}) to network.",
×
855
                        block_hash.to_hex()
×
856
                    );
857
                    let exclude_peers = source_peer.into_iter().collect();
30✔
858
                    let new_block_msg = NewBlock::from(&*block);
30✔
859
                    if let Err(e) = self.outbound_nci.propagate_block(new_block_msg, exclude_peers).await {
30✔
860
                        warn!(
×
861
                            target: LOG_TARGET,
×
862
                            "Failed to propagate block ({}) to network: {}.",
×
863
                            block_hash.to_hex(), e
×
864
                        );
865
                    }
30✔
866
                }
×
867
                Ok(block_hash)
30✔
868
            },
869

870
            Err(e @ ChainStorageError::ValidationError { .. }) => {
2✔
871
                #[cfg(feature = "metrics")]
2✔
872
                {
2✔
873
                    let block_hash = block.hash();
2✔
874
                    metrics::rejected_blocks(block.header.height, &block_hash).inc();
2✔
875
                }
2✔
876
                warn!(
2✔
877
                    target: LOG_TARGET,
×
878
                    "Peer {} sent an invalid block: {}",
×
879
                    source_peer
×
880
                        .as_ref()
×
881
                        .map(ToString::to_string)
×
882
                        .unwrap_or_else(|| "<local request>".to_string()),
×
883
                    e
884
                );
885
                self.publish_block_event(BlockEvent::AddBlockValidationFailed { block, source_peer });
2✔
886
                Err(e.into())
2✔
887
            },
888

889
            Err(e) => {
×
890
                #[cfg(feature = "metrics")]
×
891
                metrics::rejected_blocks(block.header.height, &block.hash()).inc();
×
892

×
893
                self.publish_block_event(BlockEvent::AddBlockErrored { block });
×
894
                Err(e.into())
×
895
            },
896
        }
897
    }
32✔
898

899
    async fn hydrate_block(&mut self, block: Block) -> Result<Arc<Block>, CommsInterfaceError> {
32✔
900
        let block_hash = block.hash();
32✔
901
        let block_height = block.header.height;
32✔
902
        if block.body.inputs().is_empty() {
32✔
903
            debug!(
24✔
904
                target: LOG_TARGET,
×
905
                "Block #{} ({}) contains no inputs so nothing to hydrate",
×
906
                block_height,
×
907
                block_hash.to_hex(),
×
908
            );
909
            return Ok(Arc::new(block));
24✔
910
        }
8✔
911

8✔
912
        let timer = Instant::now();
8✔
913
        let (header, mut inputs, outputs, kernels) = block.dissolve();
8✔
914

915
        let db = self.blockchain_db.inner().db_read_access()?;
8✔
916
        for input in &mut inputs {
17✔
917
            if !input.is_compact() {
9✔
918
                continue;
6✔
919
            }
3✔
920

921
            let output_mined_info =
3✔
922
                db.fetch_output(&input.output_hash())?
3✔
923
                    .ok_or_else(|| CommsInterfaceError::InvalidFullBlock {
3✔
924
                        hash: block_hash,
×
925
                        details: format!("Output {} to be spent does not exist in db", input.output_hash()),
×
926
                    })?;
3✔
927

928
            let rp_hash = match output_mined_info.output.proof {
3✔
929
                Some(proof) => proof.hash(),
3✔
930
                None => FixedHash::zero(),
×
931
            };
932
            input.add_output_data(
3✔
933
                output_mined_info.output.version,
3✔
934
                output_mined_info.output.features,
3✔
935
                output_mined_info.output.commitment,
3✔
936
                output_mined_info.output.script,
3✔
937
                output_mined_info.output.sender_offset_public_key,
3✔
938
                output_mined_info.output.covenant,
3✔
939
                output_mined_info.output.encrypted_data,
3✔
940
                output_mined_info.output.metadata_signature,
3✔
941
                rp_hash,
3✔
942
                output_mined_info.output.minimum_value_promise,
3✔
943
            );
3✔
944
        }
945
        debug!(
8✔
946
            target: LOG_TARGET,
×
947
            "Hydrated block #{} ({}) with {} input(s) in {:.2?}",
×
948
            block_height,
×
949
            block_hash.to_hex(),
×
950
            inputs.len(),
×
951
            timer.elapsed()
×
952
        );
953
        let block = Block::new(header, AggregateBody::new(inputs, outputs, kernels));
8✔
954
        Ok(Arc::new(block))
8✔
955
    }
32✔
956

957
    fn publish_block_event(&self, event: BlockEvent) {
32✔
958
        if let Err(event) = self.block_event_sender.send(Arc::new(event)) {
32✔
959
            debug!(target: LOG_TARGET, "No event subscribers. Event {} dropped.", event.0)
×
960
        }
32✔
961
    }
32✔
962

963
    #[cfg(feature = "metrics")]
964
    async fn update_block_result_metrics(&self, block_add_result: &BlockAddResult) -> Result<(), CommsInterfaceError> {
30✔
965
        fn update_target_difficulty(block: &ChainBlock) {
30✔
966
            match block.header().pow_algo() {
30✔
967
                PowAlgorithm::Sha3x => {
30✔
968
                    metrics::target_difficulty_sha()
30✔
969
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
30✔
970
                },
30✔
971
                PowAlgorithm::RandomX => {
30✔
972
                    metrics::target_difficulty_randomx()
×
973
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
974
                },
×
975
            }
30✔
976
        }
30✔
977

30✔
978
        match block_add_result {
30✔
979
            BlockAddResult::Ok(ref block) => {
30✔
980
                update_target_difficulty(block);
30✔
981
                #[allow(clippy::cast_possible_wrap)]
30✔
982
                metrics::tip_height().set(block.height() as i64);
30✔
983
                let utxo_set_size = self.blockchain_db.utxo_count().await?;
30✔
984
                metrics::utxo_set_size().set(utxo_set_size.try_into().unwrap_or(i64::MAX));
30✔
985
            },
986
            BlockAddResult::ChainReorg { added, removed } => {
×
987
                if let Some(fork_height) = added.last().map(|b| b.height()) {
×
988
                    #[allow(clippy::cast_possible_wrap)]
989
                    metrics::tip_height().set(fork_height as i64);
×
990
                    metrics::reorg(fork_height, added.len(), removed.len()).inc();
×
991

992
                    let utxo_set_size = self.blockchain_db.utxo_count().await?;
×
993
                    metrics::utxo_set_size().set(utxo_set_size.try_into().unwrap_or(i64::MAX));
×
994
                }
×
995
                for block in added {
×
996
                    update_target_difficulty(block);
×
997
                }
×
998
            },
999
            BlockAddResult::OrphanBlock => {
×
1000
                metrics::orphaned_blocks().inc();
×
1001
            },
×
1002
            _ => {},
×
1003
        }
1004
        Ok(())
30✔
1005
    }
30✔
1006

1007
    async fn get_target_difficulty_for_next_block(
3✔
1008
        &self,
3✔
1009
        pow_algo: PowAlgorithm,
3✔
1010
        constants: &ConsensusConstants,
3✔
1011
        current_block_hash: HashOutput,
3✔
1012
    ) -> Result<Difficulty, CommsInterfaceError> {
3✔
1013
        let target_difficulty = self
3✔
1014
            .blockchain_db
3✔
1015
            .fetch_target_difficulty_for_next_block(pow_algo, current_block_hash)
3✔
1016
            .await?;
3✔
1017

1018
        let target = target_difficulty.calculate(
3✔
1019
            constants.min_pow_difficulty(pow_algo),
3✔
1020
            constants.max_pow_difficulty(pow_algo),
3✔
1021
        );
3✔
1022
        debug!(target: LOG_TARGET, "Target difficulty {} for PoW {}", target, pow_algo);
3✔
1023
        Ok(target)
3✔
1024
    }
3✔
1025

1026
    pub async fn get_last_seen_hash(&self) -> Result<FixedHash, CommsInterfaceError> {
×
1027
        self.mempool.get_last_seen_hash().await.map_err(|e| e.into())
×
1028
    }
×
1029
}
1030

1031
impl<B> Clone for InboundNodeCommsHandlers<B> {
1032
    fn clone(&self) -> Self {
204✔
1033
        Self {
204✔
1034
            block_event_sender: self.block_event_sender.clone(),
204✔
1035
            blockchain_db: self.blockchain_db.clone(),
204✔
1036
            mempool: self.mempool.clone(),
204✔
1037
            consensus_manager: self.consensus_manager.clone(),
204✔
1038
            list_of_reconciling_blocks: self.list_of_reconciling_blocks.clone(),
204✔
1039
            outbound_nci: self.outbound_nci.clone(),
204✔
1040
            connectivity: self.connectivity.clone(),
204✔
1041
            randomx_factory: self.randomx_factory.clone(),
204✔
1042
        }
204✔
1043
    }
204✔
1044
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc