• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 15120110241

19 May 2025 06:08PM UTC coverage: 73.213% (-0.06%) from 73.269%
15120110241

push

github

web-flow
feat!: add second tari only randomx mining (#7057)

Description
---

Adds in a second randomx algo mining option, only mining tari.

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

- **New Features**
- Introduced distinct support for Monero RandomX and Tari RandomX
proof-of-work algorithms with separate difficulty tracking, hash rate
reporting, and block template caching.
- Added a new VM key field in block results to enhance mining and
validation processes.
- Extended miner configuration and mining logic to support multiple
proof-of-work algorithms including Tari RandomX.

- **Bug Fixes**
- Improved difficulty and hash rate accuracy by separating Monero and
Tari RandomX calculations and metrics.

- **Refactor**
- Renamed and split data structures, enums, protobuf messages, and
methods to differentiate between Monero and Tari RandomX.
- Updated consensus, validation, and chain strength comparison to handle
multiple RandomX variants.
- Migrated accumulated difficulty representations from 256-bit to
512-bit integers for enhanced precision.
- Generalized difficulty window handling to support multiple
proof-of-work algorithms dynamically.

- **Documentation**
- Clarified comments and field descriptions to reflect the distinction
between Monero and Tari RandomX algorithms.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->

170 of 371 new or added lines in 26 files covered. (45.82%)

40 existing lines in 12 files now uncovered.

82064 of 112089 relevant lines covered (73.21%)

274996.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

63.47
/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs
1
//  Copyright 2020, The Tari Project
2
//
3
//  Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
4
//  following conditions are met:
5
//
6
//  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
7
//  disclaimer.
8
//
9
//  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
10
//  following disclaimer in the documentation and/or other materials provided with the distribution.
11
//
12
//  3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
13
//  products derived from this software without specific prior written permission.
14
//
15
//  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16
//  INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
//  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18
//  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19
//  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
20
//  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
21
//  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22

23
use std::{
24
    convert::{TryFrom, TryInto},
25
    sync::Arc,
26
    time::{Duration, Instant},
27
};
28

29
use futures::StreamExt;
30
use log::*;
31
use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId, protocol::rpc::RpcClient, PeerConnection};
32
use tari_utilities::hex::Hex;
33

34
use super::error::BlockSyncError;
35
use crate::{
36
    base_node::{
37
        sync::{ban::PeerBanManager, hooks::Hooks, rpc, SyncPeer},
38
        BlockchainSyncConfig,
39
    },
40
    blocks::{Block, ChainBlock},
41
    chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend},
42
    common::{rolling_avg::RollingAverageTime, BanPeriod},
43
    proto::base_node::SyncBlocksRequest,
44
    transactions::aggregated_body::AggregateBody,
45
    validation::{BlockBodyValidator, ValidationError},
46
};
47

48
const LOG_TARGET: &str = "c::bn::block_sync";
49

50
const MAX_LATENCY_INCREASES: usize = 5;
51

52
pub struct BlockSynchronizer<'a, B> {
53
    config: BlockchainSyncConfig,
54
    db: AsyncBlockchainDb<B>,
55
    connectivity: ConnectivityRequester,
56
    sync_peers: &'a mut Vec<SyncPeer>,
57
    block_validator: Arc<dyn BlockBodyValidator<B>>,
58
    hooks: Hooks,
59
    peer_ban_manager: PeerBanManager,
60
}
61

62
impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> {
63
    pub fn new(
7✔
64
        config: BlockchainSyncConfig,
7✔
65
        db: AsyncBlockchainDb<B>,
7✔
66
        connectivity: ConnectivityRequester,
7✔
67
        sync_peers: &'a mut Vec<SyncPeer>,
7✔
68
        block_validator: Arc<dyn BlockBodyValidator<B>>,
7✔
69
    ) -> Self {
7✔
70
        let peer_ban_manager = PeerBanManager::new(config.clone(), connectivity.clone());
7✔
71
        Self {
7✔
72
            config,
7✔
73
            db,
7✔
74
            connectivity,
7✔
75
            sync_peers,
7✔
76
            block_validator,
7✔
77
            hooks: Default::default(),
7✔
78
            peer_ban_manager,
7✔
79
        }
7✔
80
    }
7✔
81

82
    pub fn on_starting<H>(&mut self, hook: H)
7✔
83
    where for<'r> H: FnOnce(&SyncPeer) + Send + Sync + 'static {
7✔
84
        self.hooks.add_on_starting_hook(hook);
7✔
85
    }
7✔
86

87
    pub fn on_progress<H>(&mut self, hook: H)
7✔
88
    where H: Fn(Arc<ChainBlock>, u64, &SyncPeer) + Send + Sync + 'static {
7✔
89
        self.hooks.add_on_progress_block_hook(hook);
7✔
90
    }
7✔
91

92
    pub fn on_complete<H>(&mut self, hook: H)
7✔
93
    where H: Fn(Arc<ChainBlock>, u64) + Send + Sync + 'static {
7✔
94
        self.hooks.add_on_complete_hook(hook);
7✔
95
    }
7✔
96

97
    pub async fn synchronize(&mut self) -> Result<(), BlockSyncError> {
7✔
98
        let mut max_latency = self.config.initial_max_sync_latency;
7✔
99
        let mut sync_round = 0;
7✔
100
        let mut latency_increases_counter = 0;
7✔
101
        loop {
102
            match self.attempt_block_sync(max_latency).await {
7✔
103
                Ok(_) => return Ok(()),
5✔
104
                Err(err @ BlockSyncError::AllSyncPeersExceedLatency) => {
×
105
                    warn!(target: LOG_TARGET, "{}", err);
×
106
                    max_latency += self.config.max_latency_increase;
×
107
                    warn!(
×
108
                        target: LOG_TARGET,
×
109
                        "Retrying block sync with increased max latency {:.2?} with {} sync peers",
×
110
                        max_latency,
×
111
                        self.sync_peers.len()
×
112
                    );
113
                    latency_increases_counter += 1;
×
114
                    if latency_increases_counter > MAX_LATENCY_INCREASES {
×
115
                        return Err(err);
×
116
                    }
×
117
                    // Prohibit using a few slow sync peers only, rather get new sync peers assigned
×
118
                    if self.sync_peers.len() < 2 {
×
119
                        return Err(err);
×
120
                    } else {
121
                        continue;
×
122
                    }
123
                },
124
                Err(err @ BlockSyncError::SyncRoundFailed) => {
×
125
                    sync_round += 1;
×
126
                    warn!(target: LOG_TARGET, "{} ({})", err, sync_round);
×
127
                    continue;
×
128
                },
129
                Err(err) => {
2✔
130
                    return Err(err);
2✔
131
                },
132
            }
133
        }
134
    }
7✔
135

136
    async fn attempt_block_sync(&mut self, max_latency: Duration) -> Result<(), BlockSyncError> {
7✔
137
        let sync_peer_node_ids = self.sync_peers.iter().map(|p| p.node_id()).cloned().collect::<Vec<_>>();
7✔
138
        info!(
7✔
139
            target: LOG_TARGET,
×
140
            "Attempting to sync blocks({} sync peers)",
×
141
            sync_peer_node_ids.len()
×
142
        );
143
        let mut latency_counter = 0usize;
7✔
144
        for node_id in sync_peer_node_ids {
9✔
145
            let peer_index = self.get_sync_peer_index(&node_id).ok_or(BlockSyncError::PeerNotFound)?;
7✔
146
            let sync_peer = &self.sync_peers[peer_index];
7✔
147
            self.hooks.call_on_starting_hook(sync_peer);
7✔
148
            let mut conn = match self.connect_to_sync_peer(node_id.clone()).await {
7✔
149
                Ok(val) => val,
7✔
150
                Err(e) => {
×
151
                    warn!(
×
152
                        target: LOG_TARGET,
×
153
                        "Failed to connect to sync peer `{}`: {}", node_id, e
×
154
                    );
155
                    self.remove_sync_peer(&node_id);
×
156
                    continue;
×
157
                },
158
            };
159
            let config = RpcClient::builder()
7✔
160
                .with_deadline(self.config.rpc_deadline)
7✔
161
                .with_deadline_grace_period(Duration::from_secs(5));
7✔
162
            let mut client = match conn
7✔
163
                .connect_rpc_using_builder::<rpc::BaseNodeSyncRpcClient>(config)
7✔
164
                .await
7✔
165
            {
166
                Ok(val) => val,
7✔
167
                Err(e) => {
×
168
                    warn!(
×
169
                        target: LOG_TARGET,
×
170
                        "Failed to obtain RPC connection from sync peer `{}`: {}", node_id, e
×
171
                    );
172
                    self.remove_sync_peer(&node_id);
×
173
                    continue;
×
174
                },
175
            };
176
            let latency = client
7✔
177
                .get_last_request_latency()
7✔
178
                .expect("unreachable panic: last request latency must be set after connect");
7✔
179
            self.sync_peers[peer_index].set_latency(latency);
7✔
180
            let sync_peer = self.sync_peers[peer_index].clone();
7✔
181
            info!(
7✔
182
                target: LOG_TARGET,
×
183
                "Attempting to synchronize blocks with `{}` latency: {:.2?}", node_id, latency
×
184
            );
185
            match self.synchronize_blocks(sync_peer, client, max_latency).await {
7✔
186
                Ok(_) => return Ok(()),
5✔
187
                Err(err) => {
2✔
188
                    warn!(target: LOG_TARGET, "{}", err);
2✔
189
                    let ban_reason = BlockSyncError::get_ban_reason(&err);
2✔
190
                    if let Some(reason) = ban_reason {
2✔
191
                        let duration = match reason.ban_duration {
2✔
192
                            BanPeriod::Short => self.config.short_ban_period,
2✔
193
                            BanPeriod::Long => self.config.ban_period,
×
194
                        };
195
                        self.peer_ban_manager
2✔
196
                            .ban_peer_if_required(&node_id, reason.reason, duration)
2✔
197
                            .await;
2✔
198
                    }
×
199
                    if let BlockSyncError::MaxLatencyExceeded { .. } = err {
2✔
200
                        latency_counter += 1;
×
201
                    } else {
2✔
202
                        self.remove_sync_peer(&node_id);
2✔
203
                    }
2✔
204
                },
205
            }
206
        }
207

208
        if self.sync_peers.is_empty() {
2✔
209
            Err(BlockSyncError::NoMoreSyncPeers("Block sync failed".to_string()))
2✔
210
        } else if latency_counter >= self.sync_peers.len() {
×
211
            Err(BlockSyncError::AllSyncPeersExceedLatency)
×
212
        } else {
213
            Err(BlockSyncError::SyncRoundFailed)
×
214
        }
215
    }
7✔
216

217
    async fn connect_to_sync_peer(&self, peer: NodeId) -> Result<PeerConnection, BlockSyncError> {
7✔
218
        let connection = self.connectivity.dial_peer(peer).await?;
7✔
219
        Ok(connection)
7✔
220
    }
7✔
221

222
    #[allow(clippy::too_many_lines)]
223
    async fn synchronize_blocks(
7✔
224
        &mut self,
7✔
225
        mut sync_peer: SyncPeer,
7✔
226
        mut client: rpc::BaseNodeSyncRpcClient,
7✔
227
        max_latency: Duration,
7✔
228
    ) -> Result<(), BlockSyncError> {
7✔
229
        info!(target: LOG_TARGET, "Starting block sync from peer {}", sync_peer.node_id());
7✔
230

231
        let tip_header = self.db.fetch_last_header().await?;
7✔
232
        let local_metadata = self.db.get_chain_metadata().await?;
7✔
233

234
        if tip_header.height <= local_metadata.best_block_height() {
7✔
235
            debug!(
1✔
236
                target: LOG_TARGET,
×
237
                "Blocks already synchronized to height {}.", tip_header.height
×
238
            );
239
            return Ok(());
1✔
240
        }
6✔
241

6✔
242
        let tip_hash = tip_header.hash();
6✔
243
        let tip_height = tip_header.height;
6✔
244
        let best_height = local_metadata.best_block_height();
6✔
245
        let chain_header = self.db.fetch_chain_header(best_height).await?;
6✔
246

247
        let best_full_block_hash = chain_header.accumulated_data().hash;
6✔
248
        debug!(
6✔
249
            target: LOG_TARGET,
×
250
            "Starting block sync from peer `{}`. Current best block is #{} `{}`. Syncing to #{} ({}).",
×
251
            sync_peer,
×
252
            best_height,
×
253
            best_full_block_hash.to_hex(),
×
254
            tip_height,
×
255
            tip_hash.to_hex()
×
256
        );
257
        let request = SyncBlocksRequest {
6✔
258
            start_hash: best_full_block_hash.to_vec(),
6✔
259
            // To the tip!
6✔
260
            end_hash: tip_hash.to_vec(),
6✔
261
        };
6✔
262

263
        let mut block_stream = client.sync_blocks(request).await?;
6✔
264
        let mut prev_hash = best_full_block_hash;
6✔
265
        let mut current_block = None;
6✔
266
        let mut last_sync_timer = Instant::now();
6✔
267
        let mut avg_latency = RollingAverageTime::new(20);
6✔
268
        while let Some(block_result) = block_stream.next().await {
23✔
269
            let latency = last_sync_timer.elapsed();
19✔
270
            avg_latency.add_sample(latency);
19✔
271
            let block_body_response = block_result?;
19✔
272

273
            let header = self
17✔
274
                .db
17✔
275
                .fetch_chain_header_by_block_hash(block_body_response.hash.clone().try_into()?)
17✔
276
                .await?
17✔
277
                .ok_or_else(|| {
17✔
278
                    BlockSyncError::UnknownHeaderHash(format!(
×
279
                        "Peer sent hash ({}) for block header we do not have",
×
280
                        block_body_response.hash.to_hex()
×
281
                    ))
×
282
                })?;
17✔
283

284
            let current_height = header.height();
17✔
285
            let header_hash = *header.hash();
17✔
286
            let timestamp = header.timestamp();
17✔
287

17✔
288
            if header.header().prev_hash != prev_hash {
17✔
289
                return Err(BlockSyncError::BlockWithoutParent {
×
290
                    expected: prev_hash.to_hex(),
×
291
                    got: header.header().prev_hash.to_hex(),
×
292
                });
×
293
            }
17✔
294

17✔
295
            prev_hash = header_hash;
17✔
296

297
            let body = block_body_response
17✔
298
                .body
17✔
299
                .map(AggregateBody::try_from)
17✔
300
                .ok_or_else(|| BlockSyncError::InvalidBlockBody("Peer sent empty block".to_string()))?
17✔
301
                .map_err(BlockSyncError::InvalidBlockBody)?;
17✔
302

303
            debug!(
17✔
304
                target: LOG_TARGET,
×
305
                "Validating block body #{} (PoW = {}, {}, latency: {:.2?})",
×
306
                current_height,
×
307
                header.header().pow_algo(),
×
308
                body.to_counts_string(),
×
309
                latency
310
            );
311

312
            let timer = Instant::now();
17✔
313
            let (header, header_accum_data) = header.into_parts();
17✔
314
            let block = Block::new(header, body);
17✔
315

17✔
316
            // Validate the block inside a tokio task
17✔
317
            let task_block = block.clone();
17✔
318
            let db = self.db.inner().clone();
17✔
319
            let validator = self.block_validator.clone();
17✔
320
            let res = {
17✔
321
                let txn = db.db_read_access()?;
17✔
322
                validator.validate_body(&*txn, &task_block)
17✔
323
            };
324

325
            let block = match res {
17✔
326
                Ok(block) => block,
17✔
327
                Err(err @ ValidationError::BadBlockFound { .. }) | Err(err @ ValidationError::FatalStorageError(_)) => {
×
328
                    return Err(err.into());
×
329
                },
330
                Err(err) => {
×
331
                    // Add to bad blocks
332
                    if let Err(err) = self
×
333
                        .db
×
334
                        .write_transaction()
×
335
                        .delete_orphan(header_hash)
×
336
                        .insert_bad_block(header_hash, current_height, err.to_string())
×
337
                        .commit()
×
338
                        .await
×
339
                    {
340
                        error!(target: LOG_TARGET, "Failed to insert bad block: {}", err);
×
341
                    }
×
342
                    return Err(err.into());
×
343
                },
344
            };
345

346
            let block = ChainBlock::try_construct(Arc::new(block), header_accum_data)
17✔
347
                .map(Arc::new)
17✔
348
                .ok_or(BlockSyncError::FailedToConstructChainBlock)?;
17✔
349

350
            debug!(
17✔
351
                target: LOG_TARGET,
×
352
                "Validated in {:.0?}. Storing block body #{} (PoW = {}, {})",
×
353
                timer.elapsed(),
×
354
                block.header().height,
×
355
                block.header().pow_algo(),
×
356
                block.block().body.to_counts_string(),
×
357
            );
358
            trace!(
17✔
359
                target: LOG_TARGET,
×
360
                "{}",block
×
361
            );
362

363
            let timer = Instant::now();
17✔
364
            self.db
17✔
365
                .write_transaction()
17✔
366
                .delete_orphan(header_hash)
17✔
367
                .insert_tip_block_body(block.clone())
17✔
368
                .set_best_block(
17✔
369
                    block.height(),
17✔
370
                    header_hash,
17✔
371
                    block.accumulated_data().total_accumulated_difficulty,
17✔
372
                    block.header().prev_hash,
17✔
373
                    timestamp,
17✔
374
                )
17✔
375
                .commit()
17✔
376
                .await?;
17✔
377

378
            // Average time between receiving blocks from the peer - used to detect a slow sync peer
379
            let last_avg_latency = avg_latency.calculate_average_with_min_samples(5);
17✔
380
            if let Some(latency) = last_avg_latency {
17✔
381
                sync_peer.set_latency(latency);
7✔
382
            }
10✔
383
            // Includes time to add block to database, used to show blocks/s on status line
384
            sync_peer.add_sample(last_sync_timer.elapsed());
17✔
385
            self.hooks
17✔
386
                .call_on_progress_block_hooks(block.clone(), tip_height, &sync_peer);
17✔
387

17✔
388
            debug!(
17✔
389
                target: LOG_TARGET,
×
NEW
390
                "Block body #{} added in {:.0?}, Tot_acc_diff {}, MoneroRX {}, TariRx {}, SHA3 {}, latency: {:.2?}",
×
391
                block.height(),
×
392
                timer.elapsed(),
×
393
                block
×
394
                    .accumulated_data()
×
395
                    .total_accumulated_difficulty,
×
NEW
396
                block.accumulated_data().accumulated_monero_randomx_difficulty,
×
NEW
397
                block.accumulated_data().accumulated_tari_randomx_difficulty,
×
UNCOV
398
                block.accumulated_data().accumulated_sha3x_difficulty,
×
399
                latency
400
            );
401
            if let Some(avg_latency) = last_avg_latency {
17✔
402
                if avg_latency > max_latency {
7✔
403
                    return Err(BlockSyncError::MaxLatencyExceeded {
×
404
                        peer: sync_peer.node_id().clone(),
×
405
                        latency: avg_latency,
×
406
                        max_latency,
×
407
                    });
×
408
                }
7✔
409
            }
10✔
410

411
            current_block = Some(block);
17✔
412
            last_sync_timer = Instant::now();
17✔
413
        }
414
        debug!(
4✔
415
            "Sync peer claim at start  - height: {}, accumulated difficulty: {}",
×
416
            sync_peer.claimed_chain_metadata().best_block_height(),
×
417
            sync_peer.claimed_chain_metadata().accumulated_difficulty(),
×
418
        );
419
        debug!(
4✔
420
            "Our best header at start  - height: {}, accumulated difficulty: {}",
×
421
            best_height,
×
422
            chain_header.accumulated_data().total_accumulated_difficulty,
×
423
        );
424
        let metadata_after_sync = self.db.get_chain_metadata().await?;
4✔
425
        debug!(
4✔
426
            "Our best block after sync - height: {}, accumulated difficulty: {}",
×
427
            metadata_after_sync.best_block_height(),
×
428
            metadata_after_sync.accumulated_difficulty(),
×
429
        );
430

431
        if metadata_after_sync.accumulated_difficulty() < sync_peer.claimed_chain_metadata().accumulated_difficulty() {
4✔
432
            return Err(BlockSyncError::PeerDidNotSupplyAllClaimedBlocks(format!(
×
433
                "Their claim - height: {}, accumulated difficulty: {}. Our status after block sync - height: {}, \
×
434
                 accumulated difficulty: {}",
×
435
                sync_peer.claimed_chain_metadata().best_block_height(),
×
436
                sync_peer.claimed_chain_metadata().accumulated_difficulty(),
×
437
                metadata_after_sync.best_block_height(),
×
438
                metadata_after_sync.accumulated_difficulty(),
×
439
            )));
×
440
        }
4✔
441

442
        if let Some(block) = current_block {
4✔
443
            self.hooks.call_on_complete_hooks(block, best_height);
4✔
444
        }
4✔
445

446
        debug!(target: LOG_TARGET, "Completed block sync with peer `{}`", sync_peer);
4✔
447

448
        Ok(())
4✔
449
    }
7✔
450

451
    // Sync peers are also removed from the list of sync peers if the ban duration is longer than the short ban period.
452
    fn remove_sync_peer(&mut self, node_id: &NodeId) {
2✔
453
        if let Some(pos) = self.sync_peers.iter().position(|p| p.node_id() == node_id) {
2✔
454
            self.sync_peers.remove(pos);
2✔
455
        }
2✔
456
    }
2✔
457

458
    // Helper function to get the index to the node_id inside of the vec of peers
459
    fn get_sync_peer_index(&mut self, node_id: &NodeId) -> Option<usize> {
7✔
460
        self.sync_peers.iter().position(|p| p.node_id() == node_id)
7✔
461
    }
7✔
462
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc