• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 17581942656

09 Sep 2025 12:03PM UTC coverage: 60.905% (-0.01%) from 60.917%
17581942656

push

github

web-flow
docs: http api documentation (#7484)

Description
---

Documentation for the various API calls for HTTP.

Motivation and Context
---

Provide adequate documentation for the use of the HTTP API calls.

How Has This Been Tested?
---
All commands have been run on actual data, aside from
`sync_utxos_by_block`.

What process can a PR reviewer use to test or verify this change?
---

Breaking Changes
---

- [x] None
- [ ] Requires data directory on base node to be deleted
- [ ] Requires hard fork
- [ ] Other - Please specify


<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

- Documentation
  - Added a comprehensive HTTP API guide for the Minotari Base Node.
- Describes base URL and default port, plus eight endpoints with
methods, paths, parameters, and response schemas.
- Includes detailed JSON and curl examples, testing instructions, and
notes on hex-encoded byte fields and nested structures.
- Documents pagination for syncing UTXOs by block and states that no
authentication is required.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->

73900 of 121337 relevant lines covered (60.9%)

295677.85 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs
1
// Copyright 2019. The Tari Project
2
//
3
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
4
// following conditions are met:
5
//
6
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
7
// disclaimer.
8
//
9
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
10
// following disclaimer in the documentation and/or other materials provided with the distribution.
11
//
12
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
13
// products derived from this software without specific prior written permission.
14
//
15
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
20
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
21
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22

23
#[cfg(feature = "metrics")]
24
use std::convert::{TryFrom, TryInto};
25
use std::{cmp::max, collections::HashSet, sync::Arc, time::Instant};
26

27
use log::*;
28
use strum_macros::Display;
29
use tari_common_types::types::{BlockHash, FixedHash, HashOutput};
30
use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId};
31
use tari_node_components::blocks::{
32
    Block,
33
    BlockBuilder,
34
    BlockHeader,
35
    BlockHeaderValidationError,
36
    ChainBlock,
37
    NewBlock,
38
    NewBlockTemplate,
39
};
40
use tari_transaction_components::{
41
    aggregated_body::AggregateBody,
42
    consensus::ConsensusConstants,
43
    tari_proof_of_work::{Difficulty, PowAlgorithm, PowError},
44
};
45
use tari_utilities::hex::Hex;
46
use tokio::sync::RwLock;
47

48
#[cfg(feature = "metrics")]
49
use crate::base_node::metrics;
50
use crate::{
51
    base_node::comms_interface::{
52
        comms_response::ValidatorNodeChange,
53
        error::CommsInterfaceError,
54
        local_interface::BlockEventSender,
55
        FetchMempoolTransactionsResponse,
56
        NodeCommsRequest,
57
        NodeCommsResponse,
58
        OutboundNodeCommsInterface,
59
    },
60
    chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainStorageError},
61
    consensus::BaseNodeConsensusManager,
62
    mempool::Mempool,
63
    proof_of_work::{
64
        cuckaroo_pow::cuckaroo_difficulty,
65
        monero_randomx_difficulty,
66
        randomx_factory::RandomXFactory,
67
        sha3x_difficulty,
68
        tari_randomx_difficulty,
69
    },
70
    validation::{helpers, tari_rx_vm_key_height, ValidationError},
71
};
72

73
const LOG_TARGET: &str = "c::bn::comms_interface::inbound_handler";
74
const MAX_REQUEST_BY_BLOCK_HASHES: usize = 100;
75
const MAX_REQUEST_BY_KERNEL_EXCESS_SIGS: usize = 100;
76
const MAX_REQUEST_BY_UTXO_HASHES: usize = 100;
77
const MAX_MEMPOOL_TIMEOUT: u64 = 150;
78
const DIFF_INDICATOR_LAG: u64 = 25;
79

80
/// Events that can be published on the Validated Block Event Stream
81
/// Broadcast is to notify subscribers if this is a valid propagated block event
82
#[derive(Debug, Clone, Display)]
83
pub enum BlockEvent {
84
    ValidBlockAdded(Arc<Block>, BlockAddResult),
85
    AddBlockValidationFailed {
86
        block: Arc<Block>,
87
        source_peer: Option<NodeId>,
88
    },
89
    AddBlockErrored {
90
        block: Arc<Block>,
91
    },
92
    BlockSyncComplete(Arc<ChainBlock>, u64),
93
    BlockSyncRewind(Vec<Arc<ChainBlock>>),
94
}
95

96
/// The InboundNodeCommsInterface is used to handle all received inbound requests from remote nodes.
97
pub struct InboundNodeCommsHandlers<B> {
98
    block_event_sender: BlockEventSender,
99
    blockchain_db: AsyncBlockchainDb<B>,
100
    mempool: Mempool,
101
    consensus_manager: BaseNodeConsensusManager,
102
    list_of_reconciling_blocks: Arc<RwLock<HashSet<HashOutput>>>,
103
    outbound_nci: OutboundNodeCommsInterface,
104
    connectivity: ConnectivityRequester,
105
    randomx_factory: RandomXFactory,
106
}
107

108
impl<B> InboundNodeCommsHandlers<B>
109
where B: BlockchainBackend + 'static
110
{
111
    /// Construct a new InboundNodeCommsInterface.
112
    pub fn new(
×
113
        block_event_sender: BlockEventSender,
×
114
        blockchain_db: AsyncBlockchainDb<B>,
×
115
        mempool: Mempool,
×
116
        consensus_manager: BaseNodeConsensusManager,
×
117
        outbound_nci: OutboundNodeCommsInterface,
×
118
        connectivity: ConnectivityRequester,
×
119
        randomx_factory: RandomXFactory,
×
120
    ) -> Self {
×
121
        Self {
×
122
            block_event_sender,
×
123
            blockchain_db,
×
124
            mempool,
×
125
            consensus_manager,
×
126
            list_of_reconciling_blocks: Arc::new(RwLock::new(HashSet::new())),
×
127
            outbound_nci,
×
128
            connectivity,
×
129
            randomx_factory,
×
130
        }
×
131
    }
×
132

133
    /// Handle inbound node comms requests from remote nodes and local services.
134
    #[allow(clippy::too_many_lines)]
135
    pub async fn handle_request(&self, request: NodeCommsRequest) -> Result<NodeCommsResponse, CommsInterfaceError> {
×
136
        trace!(target: LOG_TARGET, "Handling remote request {request}");
×
137
        match request {
×
138
            NodeCommsRequest::GetChainMetadata => Ok(NodeCommsResponse::ChainMetadata(
139
                self.blockchain_db.get_chain_metadata().await?,
×
140
            )),
141
            NodeCommsRequest::GetTargetDifficultyNextBlock(algo) => {
×
142
                let header = self.blockchain_db.fetch_tip_header().await?;
×
143
                let constants = self.consensus_manager.consensus_constants(header.header().height);
×
144
                let target_difficulty = self
×
145
                    .get_target_difficulty_for_next_block(algo, constants, *header.hash())
×
146
                    .await?;
×
147
                Ok(NodeCommsResponse::TargetDifficulty(target_difficulty))
×
148
            },
149
            NodeCommsRequest::FetchHeaders(range) => {
×
150
                let headers = self.blockchain_db.fetch_chain_headers(range).await?;
×
151
                Ok(NodeCommsResponse::BlockHeaders(headers))
×
152
            },
153
            NodeCommsRequest::FetchHeadersByHashes(block_hashes) => {
×
154
                if block_hashes.len() > MAX_REQUEST_BY_BLOCK_HASHES {
×
155
                    return Err(CommsInterfaceError::InvalidRequest {
×
156
                        request: "FetchHeadersByHashes",
×
157
                        details: format!(
×
158
                            "Exceeded maximum block hashes request (max: {}, got:{})",
×
159
                            MAX_REQUEST_BY_BLOCK_HASHES,
×
160
                            block_hashes.len()
×
161
                        ),
×
162
                    });
×
163
                }
×
164
                let mut block_headers = Vec::with_capacity(block_hashes.len());
×
165
                for block_hash in block_hashes {
×
166
                    let block_hex = block_hash.to_hex();
×
167
                    match self.blockchain_db.fetch_chain_header_by_block_hash(block_hash).await? {
×
168
                        Some(block_header) => {
×
169
                            block_headers.push(block_header);
×
170
                        },
×
171
                        None => {
172
                            error!(target: LOG_TARGET, "Could not fetch headers with hashes:{block_hex}");
×
173
                            return Err(CommsInterfaceError::InternalError(format!(
×
174
                                "Could not fetch headers with hashes:{block_hex}"
×
175
                            )));
×
176
                        },
177
                    }
178
                }
179
                Ok(NodeCommsResponse::BlockHeaders(block_headers))
×
180
            },
181
            NodeCommsRequest::FetchMatchingUtxos(utxo_hashes) => {
×
182
                let mut res = Vec::with_capacity(utxo_hashes.len());
×
183
                for (output, spent) in (self
×
184
                    .blockchain_db
×
185
                    .fetch_outputs_with_spend_status_at_tip(utxo_hashes)
×
186
                    .await?)
×
187
                    .into_iter()
×
188
                    .flatten()
×
189
                {
190
                    if !spent {
×
191
                        res.push(output);
×
192
                    }
×
193
                }
194
                Ok(NodeCommsResponse::TransactionOutputs(res))
×
195
            },
196
            NodeCommsRequest::FetchMatchingBlocks { range, compact } => {
×
197
                let blocks = self.blockchain_db.fetch_blocks(range, compact).await?;
×
198
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
199
            },
200
            NodeCommsRequest::FetchBlocksByKernelExcessSigs(excess_sigs) => {
×
201
                if excess_sigs.len() > MAX_REQUEST_BY_KERNEL_EXCESS_SIGS {
×
202
                    return Err(CommsInterfaceError::InvalidRequest {
×
203
                        request: "FetchBlocksByKernelExcessSigs",
×
204
                        details: format!(
×
205
                            "Exceeded maximum number of kernel excess sigs in request (max: {}, got:{})",
×
206
                            MAX_REQUEST_BY_KERNEL_EXCESS_SIGS,
×
207
                            excess_sigs.len()
×
208
                        ),
×
209
                    });
×
210
                }
×
211
                let mut blocks = Vec::with_capacity(excess_sigs.len());
×
212
                for sig in excess_sigs {
×
213
                    let sig_hex = sig.get_signature().to_hex();
×
214
                    debug!(
×
215
                        target: LOG_TARGET,
×
216
                        "A peer has requested a block with kernel with sig {sig_hex}"
×
217
                    );
218
                    match self.blockchain_db.fetch_block_with_kernel(sig).await {
×
219
                        Ok(Some(block)) => blocks.push(block),
×
220
                        Ok(None) => warn!(
×
221
                            target: LOG_TARGET,
×
222
                            "Could not provide requested block containing kernel with sig {sig_hex} to peer because not \
×
223
                             stored"
×
224
                        ),
225
                        Err(e) => warn!(
×
226
                            target: LOG_TARGET,
×
227
                            "Could not provide requested block containing kernel with sig {sig_hex} to peer because: {e}"
×
228
                        ),
229
                    }
230
                }
231
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
232
            },
233
            NodeCommsRequest::FetchBlocksByUtxos(commitments) => {
×
234
                if commitments.len() > MAX_REQUEST_BY_UTXO_HASHES {
×
235
                    return Err(CommsInterfaceError::InvalidRequest {
×
236
                        request: "FetchBlocksByUtxos",
×
237
                        details: format!(
×
238
                            "Exceeded maximum number of utxo hashes in request (max: {}, got:{})",
×
239
                            MAX_REQUEST_BY_UTXO_HASHES,
×
240
                            commitments.len()
×
241
                        ),
×
242
                    });
×
243
                }
×
244
                let mut blocks = Vec::with_capacity(commitments.len());
×
245
                for commitment in commitments {
×
246
                    let commitment_hex = commitment.to_hex();
×
247
                    debug!(
×
248
                        target: LOG_TARGET,
×
249
                        "A peer has requested a block with commitment {commitment_hex}",
×
250
                    );
251
                    match self.blockchain_db.fetch_block_with_utxo(commitment).await {
×
252
                        Ok(Some(block)) => blocks.push(block),
×
253
                        Ok(None) => warn!(
×
254
                            target: LOG_TARGET,
×
255
                            "Could not provide requested block with commitment {commitment_hex} because not stored"
×
256
                        ),
257
                        Err(e) => warn!(
×
258
                            target: LOG_TARGET,
×
259
                            "Could not provide requested block with commitment {commitment_hex} because: {e}"
×
260
                        ),
261
                    }
262
                }
263
                Ok(NodeCommsResponse::HistoricalBlocks(blocks))
×
264
            },
265
            NodeCommsRequest::GetHeaderByHash(hash) => {
×
266
                let header = self.blockchain_db.fetch_chain_header_by_block_hash(hash).await?;
×
267
                Ok(NodeCommsResponse::BlockHeader(header))
×
268
            },
269
            NodeCommsRequest::GetBlockByHash(hash) => {
×
270
                let block = self.blockchain_db.fetch_block_by_hash(hash, false).await?;
×
271
                Ok(NodeCommsResponse::HistoricalBlock(Box::new(block)))
×
272
            },
273
            NodeCommsRequest::GetNewBlockTemplate(request) => {
×
274
                let best_block_header = self.blockchain_db.fetch_tip_header().await?;
×
275
                let mut last_seen_hash = self.mempool.get_last_seen_hash().await?;
×
276
                let mut is_mempool_synced = false;
×
277
                let start = Instant::now();
×
278
                // this will wait a max of 150ms by default before returning anyway with a potential broken template
279
                // We need to ensure the mempool has seen the latest base node height before we can be confident the
280
                // template is correct
281
                while !is_mempool_synced && start.elapsed().as_millis() < MAX_MEMPOOL_TIMEOUT.into() {
×
282
                    if best_block_header.hash() == &last_seen_hash || last_seen_hash == FixedHash::default() {
×
283
                        is_mempool_synced = true;
×
284
                    } else {
×
285
                        tokio::time::sleep(std::time::Duration::from_millis(10)).await;
×
286
                        last_seen_hash = self.mempool.get_last_seen_hash().await?;
×
287
                    }
288
                }
289

290
                if !is_mempool_synced {
×
291
                    warn!(
×
292
                        target: LOG_TARGET,
×
293
                        "Mempool out of sync - last seen hash '{}' does not match the tip hash '{}'. This condition \
×
294
                         should auto correct with the next block template request",
×
295
                        last_seen_hash, best_block_header.hash()
×
296
                    );
297
                }
×
298
                let mut header = BlockHeader::from_previous(best_block_header.header());
×
299
                let constants = self.consensus_manager.consensus_constants(header.height);
×
300
                header.version = constants.blockchain_version().into();
×
301
                header.pow.pow_algo = request.algo;
×
302

×
303
                let constants_weight = constants.max_block_transaction_weight();
×
304
                let asking_weight = if request.max_weight > constants_weight || request.max_weight == 0 {
×
305
                    constants_weight
×
306
                } else {
307
                    request.max_weight
×
308
                };
309

310
                debug!(
×
311
                    target: LOG_TARGET,
×
312
                    "Fetching transactions with a maximum weight of {asking_weight} for the template"
×
313
                );
314
                let transactions = self
×
315
                    .mempool
×
316
                    .retrieve(asking_weight)
×
317
                    .await?
×
318
                    .into_iter()
×
319
                    .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone()))
×
320
                    .collect::<Vec<_>>();
×
321

×
322
                debug!(
×
323
                    target: LOG_TARGET,
×
324
                    "Adding {} transaction(s) to new block template",
×
325
                    transactions.len(),
×
326
                );
327

328
                let prev_hash = header.prev_hash;
×
329
                let height = header.height;
×
330

×
331
                let block = header.into_builder().with_transactions(transactions).build();
×
332
                let block_hash = block.hash();
×
333
                let block_template = NewBlockTemplate::from_block(
×
334
                    block,
×
335
                    self.get_target_difficulty_for_next_block(request.algo, constants, prev_hash)
×
336
                        .await?,
×
337
                    self.consensus_manager.get_block_reward_at(height),
×
338
                    is_mempool_synced,
×
339
                )?;
×
340

341
                debug!(target: LOG_TARGET,
×
342
                    "New block template requested and prepared at height: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}",
×
343
                    block_template.header.height,
×
344
                    block_template.target_difficulty,
×
345
                    block_hash.to_hex(),
×
346
                    block_template
×
347
                        .body
×
348
                        .calculate_weight(constants.transaction_weight_params())
×
349
                        .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?,
×
350
                    block_template.body.to_counts_string()
×
351
                );
352

353
                Ok(NodeCommsResponse::NewBlockTemplate(block_template))
×
354
            },
355
            NodeCommsRequest::GetNewBlock(block_template) => {
×
356
                let height = block_template.header.height;
×
357
                let target_difficulty = block_template.target_difficulty;
×
358
                let block = self.blockchain_db.prepare_new_block(block_template).await?;
×
359
                let constants = self.consensus_manager.consensus_constants(block.header.height);
×
360
                debug!(target: LOG_TARGET,
×
361
                    "Prepared block: #{}, target difficulty: {}, block hash: `{}`, weight: {}, {}",
×
362
                    height,
×
363
                    target_difficulty,
×
364
                    block.hash().to_hex(),
×
365
                    block
×
366
                        .body
×
367
                        .calculate_weight(constants.transaction_weight_params())
×
368
                        .map_err(|e| CommsInterfaceError::InternalError(e.to_string()))?,
×
369
                    block.body.to_counts_string()
×
370
                );
371
                Ok(NodeCommsResponse::NewBlock {
×
372
                    success: true,
×
373
                    error: None,
×
374
                    block: Some(block),
×
375
                })
×
376
            },
377
            NodeCommsRequest::GetBlockFromAllChains(hash) => {
×
378
                let block_hex = hash.to_hex();
×
379
                debug!(
×
380
                    target: LOG_TARGET,
×
381
                    "A peer has requested a block with hash {block_hex}"
×
382
                );
383

384
                #[allow(clippy::blocks_in_conditions)]
385
                let maybe_block = match self
×
386
                    .blockchain_db
×
387
                    .fetch_block_by_hash(hash, true)
×
388
                    .await
×
389
                    .unwrap_or_else(|e| {
×
390
                        warn!(
×
391
                            target: LOG_TARGET,
×
392
                            "Could not provide requested block {block_hex} to peer because: {e}",
×
393
                        );
394

395
                        None
×
396
                    }) {
×
397
                    None => self.blockchain_db.fetch_orphan(hash).await.map_or_else(
×
398
                        |e| {
×
399
                            warn!(
×
400
                                target: LOG_TARGET,
×
401
                                "Could not provide requested block {block_hex} to peer because: {e}"
×
402
                            );
403

404
                            None
×
405
                        },
×
406
                        Some,
×
407
                    ),
×
408
                    Some(block) => Some(block.into_block()),
×
409
                };
410

411
                Ok(NodeCommsResponse::Block(Box::new(maybe_block)))
×
412
            },
413
            NodeCommsRequest::FetchKernelByExcessSig(signature) => {
×
414
                let kernels = match self.blockchain_db.fetch_kernel_by_excess_sig(signature).await {
×
415
                    Ok(Some((kernel, _))) => vec![kernel],
×
416
                    Ok(None) => vec![],
×
417
                    Err(err) => {
×
418
                        error!(target: LOG_TARGET, "Could not fetch kernel {err}");
×
419
                        return Err(err.into());
×
420
                    },
421
                };
422

423
                Ok(NodeCommsResponse::TransactionKernels(kernels))
×
424
            },
425
            NodeCommsRequest::FetchMempoolTransactionsByExcessSigs { excess_sigs } => {
×
426
                let (transactions, not_found) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?;
×
427
                Ok(NodeCommsResponse::FetchMempoolTransactionsByExcessSigsResponse(
×
428
                    FetchMempoolTransactionsResponse {
×
429
                        transactions,
×
430
                        not_found,
×
431
                    },
×
432
                ))
×
433
            },
434
            NodeCommsRequest::FetchValidatorNodesKeys {
435
                height,
×
436
                validator_network,
×
437
            } => {
438
                let active_validator_nodes = self
×
439
                    .blockchain_db
×
440
                    .fetch_active_validator_nodes(height, validator_network)
×
441
                    .await?;
×
442
                Ok(NodeCommsResponse::FetchValidatorNodesKeysResponse(
×
443
                    active_validator_nodes,
×
444
                ))
×
445
            },
446
            NodeCommsRequest::GetValidatorNode {
447
                sidechain_id,
×
448
                public_key,
×
449
            } => {
450
                let vn = self.blockchain_db.get_validator_node(sidechain_id, public_key).await?;
×
451
                Ok(NodeCommsResponse::GetValidatorNode(vn))
×
452
            },
453
            NodeCommsRequest::FetchTemplateRegistrations {
454
                start_height,
×
455
                end_height,
×
456
            } => {
457
                let template_registrations = self
×
458
                    .blockchain_db
×
459
                    .fetch_template_registrations(start_height..=end_height)
×
460
                    .await?;
×
461
                Ok(NodeCommsResponse::FetchTemplateRegistrationsResponse(
×
462
                    template_registrations,
×
463
                ))
×
464
            },
465
            NodeCommsRequest::FetchUnspentUtxosInBlock { block_hash } => {
×
466
                let utxos = self.blockchain_db.fetch_outputs_in_block(block_hash).await?;
×
467
                Ok(NodeCommsResponse::TransactionOutputs(utxos))
×
468
            },
469
            NodeCommsRequest::FetchMinedInfoByPayRef(payref) => {
×
470
                let output_info = self.blockchain_db.fetch_mined_info_by_payref(payref).await?;
×
471
                Ok(NodeCommsResponse::MinedInfo(output_info))
×
472
            },
473
            NodeCommsRequest::FetchMinedInfoByOutputHash(output_hash) => {
×
474
                let output_info = self.blockchain_db.fetch_mined_info_by_output_hash(output_hash).await?;
×
475
                Ok(NodeCommsResponse::MinedInfo(output_info))
×
476
            },
477
            NodeCommsRequest::FetchOutputMinedInfo(output_hash) => {
×
478
                let output_info = self.blockchain_db.fetch_output(output_hash).await?;
×
479
                Ok(NodeCommsResponse::OutputMinedInfo(output_info))
×
480
            },
481
            NodeCommsRequest::CheckOutputSpentStatus(output_hash) => {
×
482
                let input_info = self.blockchain_db.fetch_input(output_hash).await?;
×
483
                Ok(NodeCommsResponse::InputMinedInfo(input_info))
×
484
            },
485
            NodeCommsRequest::FetchValidatorNodeChanges { epoch, sidechain_id } => {
×
486
                let added_validators = self
×
487
                    .blockchain_db
×
488
                    .fetch_validators_activating_in_epoch(sidechain_id.clone(), epoch)
×
489
                    .await?;
×
490

491
                let exit_validators = self
×
492
                    .blockchain_db
×
493
                    .fetch_validators_exiting_in_epoch(sidechain_id.clone(), epoch)
×
494
                    .await?;
×
495

496
                info!(
×
497
                    target: LOG_TARGET,
×
498
                    "Fetched {} validators activating and {} validators exiting in epoch {}",
×
499
                    added_validators.len(),
×
500
                    exit_validators.len(),
×
501
                    epoch,
502
                );
503

504
                let mut node_changes = Vec::with_capacity(added_validators.len() + exit_validators.len());
×
505

×
506
                node_changes.extend(added_validators.into_iter().map(|vn| ValidatorNodeChange::Add {
×
507
                    registration: vn.original_registration.into(),
×
508
                    activation_epoch: vn.activation_epoch,
×
509
                    minimum_value_promise: vn.minimum_value_promise,
×
510
                    shard_key: vn.shard_key,
×
511
                }));
×
512

×
513
                node_changes.extend(exit_validators.into_iter().map(|vn| ValidatorNodeChange::Remove {
×
514
                    public_key: vn.public_key,
×
515
                }));
×
516

×
517
                Ok(NodeCommsResponse::FetchValidatorNodeChangesResponse(node_changes))
×
518
            },
519
        }
520
    }
×
521

522
    /// Handles a `NewBlock` message. Only a single `NewBlock` message can be handled at once to prevent extraneous
523
    /// requests for the full block.
524
    /// This may (asynchronously) block until the other request(s) complete or time out and so should typically be
525
    /// executed in a dedicated task.
526
    pub async fn handle_new_block_message(
×
527
        &mut self,
×
528
        new_block: NewBlock,
×
529
        source_peer: NodeId,
×
530
    ) -> Result<(), CommsInterfaceError> {
×
531
        let block_hash = new_block.header.hash();
×
532

×
533
        if self.blockchain_db.inner().is_add_block_disabled() {
×
534
            info!(
×
535
                target: LOG_TARGET,
×
536
                "Ignoring block message ({}) because add_block is locked",
×
537
                block_hash.to_hex()
×
538
            );
539
            return Ok(());
×
540
        }
×
541

×
542
        // Lets check if the block exists before we try and ask for a complete block
×
543
        if self.check_exists_and_not_bad_block(block_hash).await? {
×
544
            return Ok(());
×
545
        }
×
546

×
547
        // lets check that the difficulty at least matches 50% of the tip header. The max difficulty drop is 16%, thus
×
548
        // 50% is way more than that and in order to attack the node, you need 50% of the mining power. We cannot check
×
549
        // the target difficulty as orphan blocks dont have a target difficulty. All we care here is that bad
×
550
        // blocks are not free to make, and that they are more expensive to make then they are to validate. As
×
551
        // soon as a block can be linked to the main chain, a proper full proof of work check will
×
552
        // be done before any other validation.
×
553
        self.check_min_block_difficulty(&new_block).await?;
×
554

555
        {
556
            // we use a double lock to make sure we can only reconcile one unique block at a time. We may receive the
557
            // same block from multiple peer near simultaneously. We should only reconcile each unique block once.
558
            let read_lock = self.list_of_reconciling_blocks.read().await;
×
559
            if read_lock.contains(&block_hash) {
×
560
                debug!(
×
561
                    target: LOG_TARGET,
×
562
                    "Block with hash `{}` is already being reconciled",
×
563
                    block_hash.to_hex()
×
564
                );
565
                return Ok(());
×
566
            }
×
567
        }
568
        {
569
            let mut write_lock = self.list_of_reconciling_blocks.write().await;
×
570
            if self.check_exists_and_not_bad_block(block_hash).await? {
×
571
                return Ok(());
×
572
            }
×
573

×
574
            if !write_lock.insert(block_hash) {
×
575
                debug!(
×
576
                    target: LOG_TARGET,
×
577
                    "Block with hash `{}` is already being reconciled",
×
578
                    block_hash.to_hex()
×
579
                );
580
                return Ok(());
×
581
            }
×
582
        }
×
583

×
584
        debug!(
×
585
            target: LOG_TARGET,
×
586
            "Block with hash `{}` is unknown. Constructing block from known mempool transactions / requesting missing \
×
587
             transactions from peer '{}'.",
×
588
            block_hash.to_hex(),
×
589
            source_peer
590
        );
591

592
        let result = self.reconcile_and_add_block(source_peer.clone(), new_block).await;
×
593

594
        {
595
            let mut write_lock = self.list_of_reconciling_blocks.write().await;
×
596
            write_lock.remove(&block_hash);
×
597
        }
×
598
        result?;
×
599
        Ok(())
×
600
    }
×
601

602
    async fn check_min_block_difficulty(&self, new_block: &NewBlock) -> Result<(), CommsInterfaceError> {
×
603
        let constants = self.consensus_manager.consensus_constants(new_block.header.height);
×
604
        let gen_hash = *self.consensus_manager.get_genesis_block().hash();
×
605
        let mut min_difficulty = constants.min_pow_difficulty(new_block.header.pow.pow_algo);
×
606
        let mut header = self.blockchain_db.fetch_last_chain_header().await?;
×
607
        loop {
608
            if new_block.header.pow_algo() == header.header().pow_algo() {
×
609
                min_difficulty = max(
×
610
                    header
×
611
                        .accumulated_data()
×
612
                        .target_difficulty
×
613
                        .checked_div_u64(2)
×
614
                        .unwrap_or(min_difficulty),
×
615
                    min_difficulty,
×
616
                );
×
617
                break;
×
618
            }
×
619
            if header.height() == 0 {
×
620
                break;
×
621
            }
×
622
            // we have not reached gen block, and the pow algo does not match, so lets go further back
623
            header = self
×
624
                .blockchain_db
×
625
                .fetch_chain_header(header.height().saturating_sub(1))
×
626
                .await?;
×
627
        }
628
        let achieved = match new_block.header.pow_algo() {
×
629
            PowAlgorithm::RandomXM => monero_randomx_difficulty(
×
630
                &new_block.header,
×
631
                &self.randomx_factory,
×
632
                &gen_hash,
×
633
                &self.consensus_manager,
×
634
            )?,
×
635
            PowAlgorithm::Sha3x => sha3x_difficulty(&new_block.header)?,
×
636
            PowAlgorithm::RandomXT => {
637
                let vm_key = *self
×
638
                    .blockchain_db
×
639
                    .fetch_chain_header(tari_rx_vm_key_height(header.height()))
×
640
                    .await?
×
641
                    .hash();
×
642
                tari_randomx_difficulty(&new_block.header, &self.randomx_factory, &vm_key)?
×
643
            },
644
            PowAlgorithm::Cuckaroo => {
645
                let constants = self.consensus_manager.consensus_constants(new_block.header.height);
×
646
                let cuckaroo_cycle = constants.cuckaroo_cycle_length();
×
647
                let edge_bits = constants.cuckaroo_edge_bits();
×
648
                cuckaroo_difficulty(&new_block.header, cuckaroo_cycle, edge_bits)?
×
649
            },
650
        };
651
        if achieved < min_difficulty {
×
652
            return Err(CommsInterfaceError::InvalidBlockHeader(
×
653
                BlockHeaderValidationError::ProofOfWorkError(PowError::AchievedDifficultyBelowMin),
×
654
            ));
×
655
        }
×
656
        Ok(())
×
657
    }
×
658

659
    async fn check_exists_and_not_bad_block(&self, block: FixedHash) -> Result<bool, CommsInterfaceError> {
×
660
        if self.blockchain_db.chain_header_or_orphan_exists(block).await? {
×
661
            debug!(
×
662
                target: LOG_TARGET,
×
663
                "Block with hash `{}` already stored",
×
664
                block.to_hex()
×
665
            );
666
            return Ok(true);
×
667
        }
×
668
        let (is_bad_block, reason) = self.blockchain_db.bad_block_exists(block).await?;
×
669
        if is_bad_block {
×
670
            debug!(
×
671
                target: LOG_TARGET,
×
672
                "Block with hash `{}` already validated as a bad block due to `{}`",
×
673
                block.to_hex(), reason
×
674
            );
675
            return Err(CommsInterfaceError::ChainStorageError(
×
676
                ChainStorageError::ValidationError {
×
677
                    source: ValidationError::BadBlockFound {
×
678
                        hash: block.to_hex(),
×
679
                        reason,
×
680
                    },
×
681
                },
×
682
            ));
×
683
        }
×
684
        Ok(false)
×
685
    }
×
686

687
    async fn reconcile_and_add_block(
×
688
        &mut self,
×
689
        source_peer: NodeId,
×
690
        new_block: NewBlock,
×
691
    ) -> Result<(), CommsInterfaceError> {
×
692
        let block = self.reconcile_block(source_peer.clone(), new_block).await?;
×
693
        self.handle_block(block, Some(source_peer)).await?;
×
694
        Ok(())
×
695
    }
×
696

697
    #[allow(clippy::too_many_lines)]
698
    async fn reconcile_block(
×
699
        &mut self,
×
700
        source_peer: NodeId,
×
701
        new_block: NewBlock,
×
702
    ) -> Result<Block, CommsInterfaceError> {
×
703
        let NewBlock {
×
704
            header,
×
705
            coinbase_kernels,
×
706
            coinbase_outputs,
×
707
            kernel_excess_sigs: excess_sigs,
×
708
        } = new_block;
×
709
        // If the block is empty, we dont have to ask for the block, as we already have the full block available
×
710
        // to us.
×
711
        if excess_sigs.is_empty() {
×
712
            let block = BlockBuilder::new(header.version)
×
713
                .add_outputs(coinbase_outputs)
×
714
                .add_kernels(coinbase_kernels)
×
715
                .with_header(header)
×
716
                .build();
×
717
            return Ok(block);
×
718
        }
×
719

×
720
        let block_hash = header.hash();
×
721
        // We check the current tip and orphan status of the block because we cannot guarantee that mempool state is
722
        // correct and the mmr root calculation is only valid if the block is building on the tip.
723
        let current_meta = self.blockchain_db.get_chain_metadata().await?;
×
724
        if header.prev_hash != *current_meta.best_block_hash() {
×
725
            debug!(
×
726
                target: LOG_TARGET,
×
727
                "Orphaned block #{}: ({}), current tip is: #{} ({}). We need to fetch the complete block from peer: \
×
728
                 ({})",
×
729
                header.height,
×
730
                block_hash.to_hex(),
×
731
                current_meta.best_block_height(),
×
732
                current_meta.best_block_hash().to_hex(),
×
733
                source_peer,
734
            );
735
            #[allow(clippy::cast_possible_wrap)]
736
            #[cfg(feature = "metrics")]
737
            metrics::compact_block_tx_misses(header.height).set(excess_sigs.len() as i64);
×
738
            let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
739
            return Ok(block);
×
740
        }
×
741

742
        // We know that the block is neither and orphan or a coinbase, so lets ask our mempool for the transactions
743
        let (known_transactions, missing_excess_sigs) = self.mempool.retrieve_by_excess_sigs(excess_sigs).await?;
×
744
        let known_transactions = known_transactions.into_iter().map(|tx| (*tx).clone()).collect();
×
745

×
746
        #[allow(clippy::cast_possible_wrap)]
×
747
        #[cfg(feature = "metrics")]
×
748
        metrics::compact_block_tx_misses(header.height).set(missing_excess_sigs.len() as i64);
×
749

×
750
        let mut builder = BlockBuilder::new(header.version)
×
751
            .add_outputs(coinbase_outputs)
×
752
            .add_kernels(coinbase_kernels)
×
753
            .with_transactions(known_transactions);
×
754

×
755
        if missing_excess_sigs.is_empty() {
×
756
            debug!(
×
757
                target: LOG_TARGET,
×
758
                "All transactions for block #{} ({}) found in mempool",
×
759
                header.height,
×
760
                block_hash.to_hex()
×
761
            );
762
        } else {
763
            debug!(
×
764
                target: LOG_TARGET,
×
765
                "Requesting {} unknown transaction(s) from peer '{}'.",
×
766
                missing_excess_sigs.len(),
×
767
                source_peer
768
            );
769

770
            let FetchMempoolTransactionsResponse {
771
                transactions,
×
772
                not_found,
×
773
            } = self
×
774
                .outbound_nci
×
775
                .request_transactions_by_excess_sig(source_peer.clone(), missing_excess_sigs)
×
776
                .await?;
×
777

778
            // Add returned transactions to unconfirmed pool
779
            if !transactions.is_empty() {
×
780
                self.mempool.insert_all(transactions.clone()).await?;
×
781
            }
×
782

783
            if !not_found.is_empty() {
×
784
                warn!(
×
785
                    target: LOG_TARGET,
×
786
                    "Peer {} was not able to return all transactions for block #{} ({}). {} transaction(s) not found. \
×
787
                     Requesting full block.",
×
788
                    source_peer,
×
789
                    header.height,
×
790
                    block_hash.to_hex(),
×
791
                    not_found.len()
×
792
                );
793

794
                #[cfg(feature = "metrics")]
795
                metrics::compact_block_full_misses(header.height).inc();
×
796
                let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
797
                return Ok(block);
×
798
            }
×
799

×
800
            builder = builder.with_transactions(
×
801
                transactions
×
802
                    .into_iter()
×
803
                    .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone()))
×
804
                    .collect(),
×
805
            );
×
806
        }
×
807

808
        // NB: Add the header last because `with_transactions` etc updates the current header, but we have the final one
809
        // already
810
        builder = builder.with_header(header.clone());
×
811
        let block = builder.build();
×
812

813
        // Perform a sanity check on the reconstructed block, if the MMR roots don't match then it's possible one or
814
        // more transactions in our mempool had the same excess/signature for a *different* transaction.
815
        // This is extremely unlikely, but still possible. In case of a mismatch, request the full block from the peer.
816
        let (block, mmr_roots) = match self.blockchain_db.calculate_mmr_roots(block).await {
×
817
            Err(_) => {
818
                let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
819
                return Ok(block);
×
820
            },
821
            Ok(v) => v,
×
822
        };
823
        if let Err(e) = helpers::check_mmr_roots(&header, &mmr_roots) {
×
824
            warn!(
×
825
                target: LOG_TARGET,
×
826
                "Reconstructed block #{} ({}) failed MMR check validation!. Requesting full block. Error: {}",
×
827
                header.height,
×
828
                block_hash.to_hex(),
×
829
                e,
830
            );
831

832
            #[cfg(feature = "metrics")]
833
            metrics::compact_block_mmr_mismatch(header.height).inc();
×
834
            let block = self.request_full_block_from_peer(source_peer, block_hash).await?;
×
835
            return Ok(block);
×
836
        }
×
837

×
838
        Ok(block)
×
839
    }
×
840

841
    async fn request_full_block_from_peer(
×
842
        &mut self,
×
843
        source_peer: NodeId,
×
844
        block_hash: BlockHash,
×
845
    ) -> Result<Block, CommsInterfaceError> {
×
846
        match self
×
847
            .outbound_nci
×
848
            .request_blocks_by_hashes_from_peer(block_hash, Some(source_peer.clone()))
×
849
            .await
×
850
        {
851
            Ok(Some(block)) => Ok(block),
×
852
            Ok(None) => {
853
                debug!(
×
854
                    target: LOG_TARGET,
×
855
                    "Peer `{source_peer}` failed to return the block that was requested."
×
856
                );
857
                Err(CommsInterfaceError::InvalidPeerResponse(format!(
×
858
                    "Invalid response from peer `{source_peer}`: Peer failed to provide the block that was propagated"
×
859
                )))
×
860
            },
861
            Err(CommsInterfaceError::UnexpectedApiResponse) => {
862
                debug!(
×
863
                    target: LOG_TARGET,
×
864
                    "Peer `{source_peer}` sent unexpected API response."
×
865
                );
866
                Err(CommsInterfaceError::UnexpectedApiResponse)
×
867
            },
868
            Err(e) => Err(e),
×
869
        }
870
    }
×
871

872
    /// Handle inbound blocks from remote nodes and local services.
873
    ///
874
    /// ## Arguments
875
    /// block - the block to store
876
    /// new_block_msg - propagate this new block message
877
    /// source_peer - the peer that sent this new block message, or None if the block was generated by a local miner
878
    pub async fn handle_block(
×
879
        &mut self,
×
880
        block: Block,
×
881
        source_peer: Option<NodeId>,
×
882
    ) -> Result<BlockHash, CommsInterfaceError> {
×
883
        let block_hash = block.hash();
×
884
        let block_height = block.header.height;
×
885

×
886
        info!(
×
887
            target: LOG_TARGET,
×
888
            "Block #{} ({}) received from {}",
×
889
            block_height,
×
890
            block_hash.to_hex(),
×
891
            source_peer
×
892
                .as_ref()
×
893
                .map(|p| format!("remote peer: {p}"))
×
894
                .unwrap_or_else(|| "local services".to_string())
×
895
        );
896
        debug!(target: LOG_TARGET, "Incoming block: {block}");
×
897
        let timer = Instant::now();
×
898
        let block = self.hydrate_block(block).await?;
×
899

900
        let add_block_result = self.blockchain_db.add_block(block.clone()).await;
×
901
        // Create block event on block event stream
902
        match add_block_result {
×
903
            Ok(block_add_result) => {
×
904
                debug!(
×
905
                    target: LOG_TARGET,
×
906
                    "Block #{} ({}) added ({}) to blockchain in {:.2?}",
×
907
                    block_height,
×
908
                    block_hash.to_hex(),
×
909
                    block_add_result,
×
910
                    timer.elapsed()
×
911
                );
912

913
                let should_propagate = match &block_add_result {
×
914
                    BlockAddResult::Ok(_) => true,
×
915
                    BlockAddResult::BlockExists => false,
×
916
                    BlockAddResult::OrphanBlock => false,
×
917
                    BlockAddResult::ChainReorg { .. } => true,
×
918
                };
919

920
                #[cfg(feature = "metrics")]
921
                self.update_block_result_metrics(&block_add_result).await?;
×
922

923
                self.publish_block_event(BlockEvent::ValidBlockAdded(block.clone(), block_add_result));
×
924

×
925
                if should_propagate {
×
926
                    debug!(
×
927
                        target: LOG_TARGET,
×
928
                        "Propagate block ({}) to network.",
×
929
                        block_hash.to_hex()
×
930
                    );
931
                    let exclude_peers = source_peer.into_iter().collect();
×
932
                    let new_block_msg = NewBlock::from(&*block);
×
933
                    if let Err(e) = self.outbound_nci.propagate_block(new_block_msg, exclude_peers).await {
×
934
                        warn!(
×
935
                            target: LOG_TARGET,
×
936
                            "Failed to propagate block ({}) to network: {}.",
×
937
                            block_hash.to_hex(), e
×
938
                        );
939
                    }
×
940
                }
×
941
                Ok(block_hash)
×
942
            },
943

944
            Err(e @ ChainStorageError::ValidationError { .. }) => {
×
945
                #[cfg(feature = "metrics")]
×
946
                {
×
947
                    let block_hash = block.hash();
×
948
                    metrics::rejected_blocks(block.header.height, &block_hash).inc();
×
949
                }
×
950
                warn!(
×
951
                    target: LOG_TARGET,
×
952
                    "Peer {} sent an invalid block: {}",
×
953
                    source_peer
×
954
                        .as_ref()
×
955
                        .map(ToString::to_string)
×
956
                        .unwrap_or_else(|| "<local request>".to_string()),
×
957
                    e
958
                );
959
                self.publish_block_event(BlockEvent::AddBlockValidationFailed { block, source_peer });
×
960
                Err(e.into())
×
961
            },
962

963
            Err(e) => {
×
964
                #[cfg(feature = "metrics")]
×
965
                metrics::rejected_blocks(block.header.height, &block.hash()).inc();
×
966

×
967
                self.publish_block_event(BlockEvent::AddBlockErrored { block });
×
968
                Err(e.into())
×
969
            },
970
        }
971
    }
×
972

973
    async fn hydrate_block(&mut self, block: Block) -> Result<Arc<Block>, CommsInterfaceError> {
×
974
        let block_hash = block.hash();
×
975
        let block_height = block.header.height;
×
976
        if block.body.inputs().is_empty() {
×
977
            debug!(
×
978
                target: LOG_TARGET,
×
979
                "Block #{} ({}) contains no inputs so nothing to hydrate",
×
980
                block_height,
×
981
                block_hash.to_hex(),
×
982
            );
983
            return Ok(Arc::new(block));
×
984
        }
×
985

×
986
        let timer = Instant::now();
×
987
        let (header, mut inputs, outputs, kernels) = block.dissolve();
×
988

989
        let db = self.blockchain_db.inner().db_read_access()?;
×
990
        for input in &mut inputs {
×
991
            if !input.is_compact() {
×
992
                continue;
×
993
            }
×
994

995
            let output_mined_info =
×
996
                db.fetch_output(&input.output_hash())?
×
997
                    .ok_or_else(|| CommsInterfaceError::InvalidFullBlock {
×
998
                        hash: block_hash,
×
999
                        details: format!("Output {} to be spent does not exist in db", input.output_hash()),
×
1000
                    })?;
×
1001

1002
            input.add_output_data(output_mined_info.output);
×
1003
        }
1004
        debug!(
×
1005
            target: LOG_TARGET,
×
1006
            "Hydrated block #{} ({}) with {} input(s) in {:.2?}",
×
1007
            block_height,
×
1008
            block_hash.to_hex(),
×
1009
            inputs.len(),
×
1010
            timer.elapsed()
×
1011
        );
1012
        let block = Block::new(header, AggregateBody::new(inputs, outputs, kernels));
×
1013
        Ok(Arc::new(block))
×
1014
    }
×
1015

1016
    fn publish_block_event(&self, event: BlockEvent) {
×
1017
        if let Err(event) = self.block_event_sender.send(Arc::new(event)) {
×
1018
            debug!(target: LOG_TARGET, "No event subscribers. Event {} dropped.", event.0)
×
1019
        }
×
1020
    }
×
1021

1022
    #[cfg(feature = "metrics")]
1023
    async fn update_block_result_metrics(&self, block_add_result: &BlockAddResult) -> Result<(), CommsInterfaceError> {
×
1024
        fn update_target_difficulty(block: &ChainBlock) {
×
1025
            match block.header().pow_algo() {
×
1026
                PowAlgorithm::Sha3x => {
×
1027
                    metrics::target_difficulty_sha()
×
1028
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1029
                },
×
1030
                PowAlgorithm::RandomXM => {
×
1031
                    metrics::target_difficulty_monero_randomx()
×
1032
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1033
                },
×
1034
                PowAlgorithm::RandomXT => {
×
1035
                    metrics::target_difficulty_tari_randomx()
×
1036
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1037
                },
×
1038
                PowAlgorithm::Cuckaroo => {
×
1039
                    metrics::target_difficulty_cuckaroo()
×
1040
                        .set(i64::try_from(block.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1041
                },
×
1042
            }
1043
        }
×
1044

1045
        match block_add_result {
×
1046
            BlockAddResult::Ok(ref block) => {
×
1047
                update_target_difficulty(block);
×
1048
                self.update_difficulty_indicators(block.height()).await?;
×
1049
                #[allow(clippy::cast_possible_wrap)]
1050
                metrics::tip_height().set(block.height() as i64);
×
1051
                let utxo_set_size = self.blockchain_db.utxo_count().await?;
×
1052
                metrics::utxo_set_size().set(utxo_set_size.try_into().unwrap_or(i64::MAX));
×
1053
            },
1054
            BlockAddResult::ChainReorg { added, removed } => {
×
1055
                if let Some(fork_height) = added.last().map(|b| b.height()) {
×
1056
                    #[allow(clippy::cast_possible_wrap)]
1057
                    metrics::tip_height().set(fork_height as i64);
×
1058
                    metrics::reorg(fork_height, added.len(), removed.len()).inc();
×
1059

1060
                    let utxo_set_size = self.blockchain_db.utxo_count().await?;
×
1061
                    metrics::utxo_set_size().set(utxo_set_size.try_into().unwrap_or(i64::MAX));
×
1062
                }
×
1063
                for block in added {
×
1064
                    update_target_difficulty(block);
×
1065
                    self.update_difficulty_indicators(block.height()).await?;
×
1066
                }
1067
            },
1068
            BlockAddResult::OrphanBlock => {
×
1069
                metrics::orphaned_blocks().inc();
×
1070
            },
×
1071
            _ => {},
×
1072
        }
1073
        Ok(())
×
1074
    }
×
1075

1076
    #[cfg(feature = "metrics")]
1077
    async fn update_difficulty_indicators(&self, tip: u64) -> Result<(), CommsInterfaceError> {
×
1078
        // Use canonical height from tip where reorgs are highly unlikely
×
1079
        if tip <= DIFF_INDICATOR_LAG {
×
1080
            // Not enough history yet; clear or skip
1081
            metrics::accumulated_difficulty_indicator().set(0);
×
1082
            metrics::target_difficulty_indicator().set(0);
×
1083
            metrics::difficulty_indicator_height().set(0);
×
1084
            metrics::target_difficulty().set(0);
×
1085
            metrics::accumulated_difficulty_exp2().set(0);
×
1086
            metrics::accumulated_difficulty_sig53().set(0);
×
1087
            metrics::accumulated_difficulty_as_f64().set(0.0);
×
1088
            return Ok(());
×
1089
        }
×
1090
        let height = tip - DIFF_INDICATOR_LAG;
×
1091
        let chain_header = self.blockchain_db.fetch_chain_header(height).await?;
×
1092

1093
        // Compute indicators in millibits as `logâ‚‚(value) * 1000` to make huge numbers fathomable in a time-series
1094
        // graph with enough granularity
1095
        let acc_diff_milli_bits = metrics::log2_u512(&chain_header.accumulated_data().total_accumulated_difficulty)
×
1096
            .map(metrics::milli_bits)
×
1097
            .unwrap_or(0);
×
1098
        let target_diff_milli_bits =
×
1099
            metrics::log2_u128(u128::from(chain_header.accumulated_data().target_difficulty.as_u64()))
×
1100
                .map(metrics::milli_bits)
×
1101
                .unwrap_or(0);
×
1102
        let (acc_diff_exp2, acc_diff_sig53) =
×
1103
            metrics::u512_exp2_sig53(&chain_header.accumulated_data().total_accumulated_difficulty).unwrap_or((0, 0));
×
1104
        let acc_diff_as_f64 =
×
1105
            metrics::approximate_u512_with_f64(&chain_header.accumulated_data().total_accumulated_difficulty)
×
1106
                .unwrap_or(0.0);
×
1107

×
1108
        // Publish
×
1109
        metrics::accumulated_difficulty_indicator().set(acc_diff_milli_bits);
×
1110
        metrics::target_difficulty_indicator().set(target_diff_milli_bits);
×
1111
        #[allow(clippy::cast_possible_wrap)]
×
1112
        metrics::difficulty_indicator_height().set(height as i64);
×
1113
        #[allow(clippy::cast_possible_wrap)]
×
1114
        metrics::target_difficulty()
×
1115
            .set(i64::try_from(chain_header.accumulated_data().target_difficulty.as_u64()).unwrap_or(i64::MAX));
×
1116
        metrics::accumulated_difficulty_exp2().set(acc_diff_exp2);
×
1117
        metrics::accumulated_difficulty_sig53().set(acc_diff_sig53);
×
1118
        metrics::accumulated_difficulty_as_f64().set(acc_diff_as_f64);
×
1119

×
1120
        Ok(())
×
1121
    }
×
1122

1123
    async fn get_target_difficulty_for_next_block(
×
1124
        &self,
×
1125
        pow_algo: PowAlgorithm,
×
1126
        constants: &ConsensusConstants,
×
1127
        current_block_hash: HashOutput,
×
1128
    ) -> Result<Difficulty, CommsInterfaceError> {
×
1129
        let target_difficulty = self
×
1130
            .blockchain_db
×
1131
            .fetch_target_difficulty_for_next_block(pow_algo, current_block_hash)
×
1132
            .await?;
×
1133

1134
        let target = target_difficulty.calculate(
×
1135
            constants.min_pow_difficulty(pow_algo),
×
1136
            constants.max_pow_difficulty(pow_algo),
×
1137
        );
×
1138
        trace!(target: LOG_TARGET, "Target difficulty {target} for PoW {pow_algo}");
×
1139
        Ok(target)
×
1140
    }
×
1141

1142
    pub async fn get_last_seen_hash(&self) -> Result<FixedHash, CommsInterfaceError> {
×
1143
        self.mempool.get_last_seen_hash().await.map_err(|e| e.into())
×
1144
    }
×
1145
}
1146

1147
impl<B> Clone for InboundNodeCommsHandlers<B> {
1148
    fn clone(&self) -> Self {
×
1149
        Self {
×
1150
            block_event_sender: self.block_event_sender.clone(),
×
1151
            blockchain_db: self.blockchain_db.clone(),
×
1152
            mempool: self.mempool.clone(),
×
1153
            consensus_manager: self.consensus_manager.clone(),
×
1154
            list_of_reconciling_blocks: self.list_of_reconciling_blocks.clone(),
×
1155
            outbound_nci: self.outbound_nci.clone(),
×
1156
            connectivity: self.connectivity.clone(),
×
1157
            randomx_factory: self.randomx_factory.clone(),
×
1158
        }
×
1159
    }
×
1160
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc