• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 17033178607

18 Aug 2025 06:45AM UTC coverage: 54.49% (-0.007%) from 54.497%
17033178607

push

github

stringhandler
Merge branch 'development' of github.com:tari-project/tari into odev

971 of 2923 new or added lines in 369 files covered. (33.22%)

5804 existing lines in 173 files now uncovered.

76688 of 140739 relevant lines covered (54.49%)

193850.18 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs
1
//  Copyright 2020, The Tari Project
2
//
3
//  Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
4
//  following conditions are met:
5
//
6
//  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
7
//  disclaimer.
8
//
9
//  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
10
//  following disclaimer in the documentation and/or other materials provided with the distribution.
11
//
12
//  3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
13
//  products derived from this software without specific prior written permission.
14
//
15
//  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16
//  INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
//  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18
//  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19
//  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
20
//  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
21
//  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22

23
use std::{
24
    convert::{TryFrom, TryInto},
25
    sync::Arc,
26
    time::{Duration, Instant},
27
};
28

29
use futures::StreamExt;
30
use log::*;
31
use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId, protocol::rpc::RpcClient, PeerConnection};
32
use tari_utilities::hex::Hex;
33

34
use super::error::BlockSyncError;
35
use crate::{
36
    base_node::{
37
        sync::{ban::PeerBanManager, hooks::Hooks, rpc, SyncPeer},
38
        BlockchainSyncConfig,
39
    },
40
    blocks::{Block, ChainBlock},
41
    chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend},
42
    common::{rolling_avg::RollingAverageTime, BanPeriod},
43
    proto::base_node::SyncBlocksRequest,
44
    transactions::aggregated_body::AggregateBody,
45
    validation::{BlockBodyValidator, ValidationError},
46
};
47

48
const LOG_TARGET: &str = "c::bn::block_sync";
49

50
const MAX_LATENCY_INCREASES: usize = 5;
51

52
pub struct BlockSynchronizer<'a, B> {
53
    config: BlockchainSyncConfig,
54
    db: AsyncBlockchainDb<B>,
55
    connectivity: ConnectivityRequester,
56
    sync_peers: &'a mut Vec<SyncPeer>,
57
    block_validator: Arc<dyn BlockBodyValidator<B>>,
58
    hooks: Hooks,
59
    peer_ban_manager: PeerBanManager,
60
}
61

62
impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> {
63
    pub fn new(
×
64
        config: BlockchainSyncConfig,
×
65
        db: AsyncBlockchainDb<B>,
×
66
        connectivity: ConnectivityRequester,
×
67
        sync_peers: &'a mut Vec<SyncPeer>,
×
68
        block_validator: Arc<dyn BlockBodyValidator<B>>,
×
69
    ) -> Self {
×
70
        let peer_ban_manager = PeerBanManager::new(config.clone(), connectivity.clone());
×
71
        Self {
×
72
            config,
×
73
            db,
×
74
            connectivity,
×
75
            sync_peers,
×
76
            block_validator,
×
77
            hooks: Default::default(),
×
78
            peer_ban_manager,
×
79
        }
×
80
    }
×
81

82
    pub fn on_starting<H>(&mut self, hook: H)
×
83
    where for<'r> H: FnOnce(&SyncPeer) + Send + Sync + 'static {
×
84
        self.hooks.add_on_starting_hook(hook);
×
85
    }
×
86

87
    pub fn on_progress<H>(&mut self, hook: H)
×
88
    where H: Fn(Arc<ChainBlock>, u64, &SyncPeer) + Send + Sync + 'static {
×
89
        self.hooks.add_on_progress_block_hook(hook);
×
90
    }
×
91

92
    pub fn on_complete<H>(&mut self, hook: H)
×
93
    where H: Fn(Arc<ChainBlock>, u64) + Send + Sync + 'static {
×
94
        self.hooks.add_on_complete_hook(hook);
×
95
    }
×
96

97
    pub async fn synchronize(&mut self) -> Result<(), BlockSyncError> {
×
98
        let mut max_latency = self.config.initial_max_sync_latency;
×
99
        let mut sync_round = 0;
×
100
        let mut latency_increases_counter = 0;
×
101
        loop {
102
            match self.attempt_block_sync(max_latency).await {
×
103
                Ok(_) => return Ok(()),
×
104
                Err(err @ BlockSyncError::AllSyncPeersExceedLatency) => {
×
NEW
105
                    warn!(target: LOG_TARGET, "{err}");
×
106
                    max_latency += self.config.max_latency_increase;
×
107
                    warn!(
×
108
                        target: LOG_TARGET,
×
109
                        "Retrying block sync with increased max latency {:.2?} with {} sync peers",
×
110
                        max_latency,
×
111
                        self.sync_peers.len()
×
112
                    );
113
                    latency_increases_counter += 1;
×
114
                    if latency_increases_counter > MAX_LATENCY_INCREASES {
×
115
                        return Err(err);
×
116
                    }
×
117
                    // Prohibit using a few slow sync peers only, rather get new sync peers assigned
×
118
                    if self.sync_peers.len() < 2 {
×
119
                        return Err(err);
×
120
                    } else {
121
                        continue;
×
122
                    }
123
                },
124
                Err(err @ BlockSyncError::SyncRoundFailed) => {
×
125
                    sync_round += 1;
×
NEW
126
                    warn!(target: LOG_TARGET, "{err} ({sync_round})");
×
127
                    continue;
×
128
                },
129
                Err(err) => {
×
130
                    return Err(err);
×
131
                },
132
            }
133
        }
134
    }
×
135

136
    async fn attempt_block_sync(&mut self, max_latency: Duration) -> Result<(), BlockSyncError> {
×
137
        let sync_peer_node_ids = self.sync_peers.iter().map(|p| p.node_id()).cloned().collect::<Vec<_>>();
×
138
        info!(
×
139
            target: LOG_TARGET,
×
140
            "Attempting to sync blocks({} sync peers)",
×
141
            sync_peer_node_ids.len()
×
142
        );
143
        let mut latency_counter = 0usize;
×
144
        for node_id in sync_peer_node_ids {
×
145
            let peer_index = self.get_sync_peer_index(&node_id).ok_or(BlockSyncError::PeerNotFound)?;
×
NEW
146
            let sync_peer = self.sync_peers.get(peer_index).expect("Already checked");
×
147
            self.hooks.call_on_starting_hook(sync_peer);
×
148
            let mut conn = match self.connect_to_sync_peer(node_id.clone()).await {
×
149
                Ok(val) => val,
×
150
                Err(e) => {
×
151
                    warn!(
×
152
                        target: LOG_TARGET,
×
NEW
153
                        "Failed to connect to sync peer `{node_id}`: {e}"
×
154
                    );
155
                    self.remove_sync_peer(&node_id);
×
156
                    continue;
×
157
                },
158
            };
159
            let config = RpcClient::builder()
×
160
                .with_deadline(self.config.rpc_deadline)
×
161
                .with_deadline_grace_period(Duration::from_secs(5));
×
162
            let mut client = match conn
×
163
                .connect_rpc_using_builder::<rpc::BaseNodeSyncRpcClient>(config)
×
164
                .await
×
165
            {
166
                Ok(val) => val,
×
167
                Err(e) => {
×
168
                    warn!(
×
169
                        target: LOG_TARGET,
×
NEW
170
                        "Failed to obtain RPC connection from sync peer `{node_id}`: {e}"
×
171
                    );
172
                    self.remove_sync_peer(&node_id);
×
173
                    continue;
×
174
                },
175
            };
176
            let latency = client
×
177
                .get_last_request_latency()
×
178
                .expect("unreachable panic: last request latency must be set after connect");
×
NEW
179
            self.sync_peers
×
NEW
180
                .get_mut(peer_index)
×
NEW
181
                .expect("Already checked")
×
NEW
182
                .set_latency(latency);
×
NEW
183
            let sync_peer = self.sync_peers.get(peer_index).expect("Already checked").clone();
×
184
            info!(
×
185
                target: LOG_TARGET,
×
NEW
186
                "Attempting to synchronize blocks with `{node_id}` latency: {latency:.2?}"
×
187
            );
188
            match self.synchronize_blocks(sync_peer, client, max_latency).await {
×
189
                Ok(_) => return Ok(()),
×
UNCOV
190
                Err(err) => {
×
NEW
191
                    warn!(target: LOG_TARGET, "{err}");
×
192
                    let ban_reason = BlockSyncError::get_ban_reason(&err);
×
193
                    if let Some(reason) = ban_reason {
×
194
                        let duration = match reason.ban_duration {
×
195
                            BanPeriod::Short => self.config.short_ban_period,
×
196
                            BanPeriod::Long => self.config.ban_period,
×
197
                        };
198
                        self.peer_ban_manager
×
199
                            .ban_peer_if_required(&node_id, reason.reason, duration)
×
UNCOV
200
                            .await;
×
201
                    }
×
202
                    if let BlockSyncError::MaxLatencyExceeded { .. } = err {
×
203
                        latency_counter += 1;
×
204
                    } else {
×
205
                        self.remove_sync_peer(&node_id);
×
206
                    }
×
207
                },
208
            }
209
        }
210

UNCOV
211
        if self.sync_peers.is_empty() {
×
UNCOV
212
            Err(BlockSyncError::NoMoreSyncPeers("Block sync failed".to_string()))
×
UNCOV
213
        } else if latency_counter >= self.sync_peers.len() {
×
214
            Err(BlockSyncError::AllSyncPeersExceedLatency)
×
215
        } else {
216
            Err(BlockSyncError::SyncRoundFailed)
×
217
        }
UNCOV
218
    }
×
219

UNCOV
220
    async fn connect_to_sync_peer(&self, peer: NodeId) -> Result<PeerConnection, BlockSyncError> {
×
221
        let connection = self.connectivity.dial_peer(peer).await?;
×
UNCOV
222
        Ok(connection)
×
223
    }
×
224

225
    #[allow(clippy::too_many_lines)]
226
    async fn synchronize_blocks(
×
UNCOV
227
        &mut self,
×
UNCOV
228
        mut sync_peer: SyncPeer,
×
229
        mut client: rpc::BaseNodeSyncRpcClient,
×
230
        max_latency: Duration,
×
231
    ) -> Result<(), BlockSyncError> {
×
232
        info!(target: LOG_TARGET, "Starting block sync from peer {}", sync_peer.node_id());
×
233

234
        let tip_header = self.db.fetch_last_header().await?;
×
235
        let local_metadata = self.db.get_chain_metadata().await?;
×
236

237
        if tip_header.height <= local_metadata.best_block_height() {
×
238
            debug!(
×
UNCOV
239
                target: LOG_TARGET,
×
240
                "Blocks already synchronized to height {}.", tip_header.height
×
241
            );
242
            return Ok(());
×
243
        }
×
UNCOV
244

×
245
        let tip_hash = tip_header.hash();
×
246
        let tip_height = tip_header.height;
×
247
        let best_height = local_metadata.best_block_height();
×
248
        let chain_header = self.db.fetch_chain_header(best_height).await?;
×
249

250
        let best_full_block_hash = chain_header.accumulated_data().hash;
×
251
        debug!(
×
UNCOV
252
            target: LOG_TARGET,
×
253
            "Starting block sync from peer `{}`. Current best block is #{} `{}`. Syncing to #{} ({}).",
×
254
            sync_peer,
×
255
            best_height,
×
256
            best_full_block_hash.to_hex(),
×
257
            tip_height,
×
258
            tip_hash.to_hex()
×
259
        );
260
        let request = SyncBlocksRequest {
×
261
            start_hash: best_full_block_hash.to_vec(),
×
UNCOV
262
            // To the tip!
×
263
            end_hash: tip_hash.to_vec(),
×
264
        };
×
265

266
        let mut block_stream = client.sync_blocks(request).await?;
×
267
        let mut prev_hash = best_full_block_hash;
×
UNCOV
268
        let mut current_block = None;
×
269
        let mut last_sync_timer = Instant::now();
×
270
        let mut avg_latency = RollingAverageTime::new(20);
×
271
        while let Some(block_result) = block_stream.next().await {
×
272
            let latency = last_sync_timer.elapsed();
×
273
            avg_latency.add_sample(latency);
×
274
            let block_body_response = block_result?;
×
275

276
            let header = self
×
277
                .db
×
UNCOV
278
                .fetch_chain_header_by_block_hash(block_body_response.hash.clone().try_into()?)
×
279
                .await?
×
280
                .ok_or_else(|| {
×
281
                    BlockSyncError::UnknownHeaderHash(format!(
×
282
                        "Peer sent hash ({}) for block header we do not have",
×
283
                        block_body_response.hash.to_hex()
×
284
                    ))
×
285
                })?;
×
286

287
            let current_height = header.height();
×
288
            let header_hash = *header.hash();
×
UNCOV
289
            let timestamp = header.timestamp();
×
290

×
291
            if header.header().prev_hash != prev_hash {
×
292
                return Err(BlockSyncError::BlockWithoutParent {
×
293
                    expected: prev_hash.to_hex(),
×
294
                    got: header.header().prev_hash.to_hex(),
×
295
                });
×
296
            }
×
297

×
298
            prev_hash = header_hash;
×
299

300
            let body = block_body_response
×
301
                .body
×
UNCOV
302
                .map(AggregateBody::try_from)
×
303
                .ok_or_else(|| BlockSyncError::InvalidBlockBody("Peer sent empty block".to_string()))?
×
304
                .map_err(BlockSyncError::InvalidBlockBody)?;
×
305

306
            debug!(
×
307
                target: LOG_TARGET,
×
UNCOV
308
                "Validating block body #{} (PoW = {}, {}, latency: {:.2?})",
×
309
                current_height,
×
310
                header.header().pow_algo(),
×
311
                body.to_counts_string(),
×
312
                latency
313
            );
314

UNCOV
315
            let timer = Instant::now();
×
UNCOV
316
            let (header, header_accum_data) = header.into_parts();
×
UNCOV
317
            let block = Block::new(header, body);
×
318

×
319
            // Validate the block inside a tokio task
×
320
            let task_block = block.clone();
×
321
            let db = self.db.inner().clone();
×
322
            let validator = self.block_validator.clone();
×
323
            let res = {
×
324
                let txn = db.db_read_access()?;
×
325
                validator.validate_body(&*txn, &task_block)
×
326
            };
327

328
            let block = match res {
×
UNCOV
329
                Ok(block) => block,
×
UNCOV
330
                Err(err @ ValidationError::BadBlockFound { .. }) | Err(err @ ValidationError::FatalStorageError(_)) => {
×
331
                    return Err(err.into());
×
332
                },
333
                Err(err) => {
×
334
                    // Add to bad blocks
UNCOV
335
                    if let Err(err) = self
×
336
                        .db
×
UNCOV
337
                        .write_transaction()
×
338
                        .delete_orphan(header_hash)
×
339
                        .insert_bad_block(header_hash, current_height, err.to_string())
×
340
                        .commit()
×
341
                        .await
×
342
                    {
NEW
343
                        error!(target: LOG_TARGET, "Failed to insert bad block: {err}");
×
344
                    }
×
UNCOV
345
                    return Err(err.into());
×
346
                },
347
            };
348

UNCOV
349
            let block = ChainBlock::try_construct(Arc::new(block), header_accum_data)
×
UNCOV
350
                .map(Arc::new)
×
UNCOV
351
                .ok_or(BlockSyncError::FailedToConstructChainBlock)?;
×
352

353
            debug!(
×
354
                target: LOG_TARGET,
×
UNCOV
355
                "Validated in {:.0?}. Storing block body #{} (PoW = {}, {})",
×
356
                timer.elapsed(),
×
357
                block.header().height,
×
358
                block.header().pow_algo(),
×
359
                block.block().body.to_counts_string(),
×
360
            );
361
            trace!(
×
362
                target: LOG_TARGET,
×
NEW
363
                "{block}"
×
364
            );
365

366
            self.db
×
UNCOV
367
                .write_transaction()
×
UNCOV
368
                .delete_orphan(header_hash)
×
369
                .insert_tip_block_body(block.clone())
×
370
                .set_best_block(
×
371
                    block.height(),
×
372
                    header_hash,
×
373
                    block.accumulated_data().total_accumulated_difficulty,
×
374
                    block.header().prev_hash,
×
375
                    timestamp,
×
376
                )
×
377
                .commit()
×
378
                .await?;
×
379

380
            // Average time between receiving blocks from the peer - used to detect a slow sync peer
381
            let last_avg_latency = avg_latency.calculate_average_with_min_samples(5);
×
UNCOV
382
            if let Some(latency) = last_avg_latency {
×
UNCOV
383
                sync_peer.set_latency(latency);
×
384
            }
×
385
            // Includes time to add block to database, used to show blocks/s on status line
386
            sync_peer.add_sample(last_sync_timer.elapsed());
×
387
            self.hooks
×
UNCOV
388
                .call_on_progress_block_hooks(block.clone(), tip_height, &sync_peer);
×
389

390
            if let Some(avg_latency) = last_avg_latency {
×
391
                if avg_latency > max_latency {
×
UNCOV
392
                    return Err(BlockSyncError::MaxLatencyExceeded {
×
393
                        peer: sync_peer.node_id().clone(),
×
394
                        latency: avg_latency,
×
395
                        max_latency,
×
396
                    });
×
397
                }
×
398
            }
×
399

400
            current_block = Some(block);
×
401
            last_sync_timer = Instant::now();
×
402
        }
403
        debug!(
×
404
            "Sync peer claim at start  - height: {}, accumulated difficulty: {}",
×
UNCOV
405
            sync_peer.claimed_chain_metadata().best_block_height(),
×
406
            sync_peer.claimed_chain_metadata().accumulated_difficulty(),
×
407
        );
408
        debug!(
×
409
            "Our best header at start  - height: {}, accumulated difficulty: {}",
×
UNCOV
410
            best_height,
×
411
            chain_header.accumulated_data().total_accumulated_difficulty,
×
412
        );
413
        let metadata_after_sync = self.db.get_chain_metadata().await?;
×
414
        debug!(
×
UNCOV
415
            "Our best block after sync - height: {}, accumulated difficulty: {}",
×
416
            metadata_after_sync.best_block_height(),
×
417
            metadata_after_sync.accumulated_difficulty(),
×
418
        );
419

420
        if metadata_after_sync.accumulated_difficulty() < sync_peer.claimed_chain_metadata().accumulated_difficulty() {
×
UNCOV
421
            return Err(BlockSyncError::PeerDidNotSupplyAllClaimedBlocks(format!(
×
UNCOV
422
                "Their claim - height: {}, accumulated difficulty: {}. Our status after block sync - height: {}, \
×
423
                 accumulated difficulty: {}",
×
424
                sync_peer.claimed_chain_metadata().best_block_height(),
×
425
                sync_peer.claimed_chain_metadata().accumulated_difficulty(),
×
426
                metadata_after_sync.best_block_height(),
×
427
                metadata_after_sync.accumulated_difficulty(),
×
428
            )));
×
429
        }
×
430

431
        if let Some(block) = current_block {
×
432
            self.hooks.call_on_complete_hooks(block, best_height);
×
UNCOV
433
        }
×
434

NEW
435
        debug!(target: LOG_TARGET, "Completed block sync with peer `{sync_peer}`");
×
436

UNCOV
437
        Ok(())
×
438
    }
×
439

440
    // Sync peers are also removed from the list of sync peers if the ban duration is longer than the short ban period.
441
    fn remove_sync_peer(&mut self, node_id: &NodeId) {
×
UNCOV
442
        if let Some(pos) = self.sync_peers.iter().position(|p| p.node_id() == node_id) {
×
UNCOV
443
            self.sync_peers.remove(pos);
×
444
        }
×
445
    }
×
446

447
    // Helper function to get the index to the node_id inside of the vec of peers
448
    fn get_sync_peer_index(&mut self, node_id: &NodeId) -> Option<usize> {
×
UNCOV
449
        self.sync_peers.iter().position(|p| p.node_id() == node_id)
×
UNCOV
450
    }
×
451
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc