• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tari-project / tari / 14642108223

24 Apr 2025 12:52PM UTC coverage: 73.398% (+1.1%) from 72.346%
14642108223

push

github

web-flow
feat(grpc): return best block height from synced node (#6984)

Description
---
Adds `tip_height` and `local_height` to `get_sync_status`

Motivation and Context
---
TU checks flag `initial_sync_achieved` from `get_tip_info` and also
calls `get_sync_progress` to get `tip_height` but if
`initial_sync_achieved` is true then these 2 fields are 0 and we can't
check tip after syncing.

How Has This Been Tested?
---

What process can a PR reviewer use to test or verify this change?
---



Breaking Changes
---

- [x] None
- [ ] Requires data directory on base node to be deleted
- [ ] Requires hard fork
- [ ] Other - Please specify


<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->

## Summary by CodeRabbit

- **Bug Fixes**
- Improved accuracy of sync progress reporting by displaying the actual
best block height when the node is synced, instead of showing zero
values.

<!-- end of auto-generated comment: release notes by coderabbit.ai -->

81382 of 110877 relevant lines covered (73.4%)

279101.7 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.37
/base_layer/core/src/base_node/sync/header_sync/validator.rs
1
//  Copyright 2020, The Tari Project
2
//
3
//  Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
4
//  following conditions are met:
5
//
6
//  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
7
//  disclaimer.
8
//
9
//  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
10
//  following disclaimer in the documentation and/or other materials provided with the distribution.
11
//
12
//  3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
13
//  products derived from this software without specific prior written permission.
14
//
15
//  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
16
//  INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
//  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18
//  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19
//  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
20
//  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
21
//  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22
use std::cmp::Ordering;
23

24
use log::*;
25
use primitive_types::U256;
26
use tari_common_types::types::HashOutput;
27
use tari_utilities::{epoch_time::EpochTime, hex::Hex};
28

29
use crate::{
30
    base_node::sync::{header_sync::HEADER_SYNC_INITIAL_MAX_HEADERS, BlockHeaderSyncError},
31
    blocks::{BlockHeader, BlockHeaderAccumulatedData, BlockHeaderValidationError, ChainHeader},
32
    chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, TargetDifficulties},
33
    common::rolling_vec::RollingVec,
34
    consensus::ConsensusManager,
35
    proof_of_work::{randomx_factory::RandomXFactory, PowAlgorithm},
36
    validation::{header::HeaderFullValidator, DifficultyCalculator, HeaderChainLinkedValidator, ValidationError},
37
};
38

39
const LOG_TARGET: &str = "c::bn::header_sync";
40

41
#[derive(Clone)]
42
pub struct BlockHeaderSyncValidator<B> {
43
    db: AsyncBlockchainDb<B>,
44
    state: Option<State>,
45
    consensus_rules: ConsensusManager,
46
    validator: HeaderFullValidator,
47
}
48

49
#[derive(Debug, Clone)]
50
struct State {
51
    current_height: u64,
52
    timestamps: RollingVec<EpochTime>,
53
    target_difficulties: TargetDifficulties,
54
    previous_accum: BlockHeaderAccumulatedData,
55
    previous_header: BlockHeader,
56
    valid_headers: Vec<ChainHeader>,
57
}
58

59
impl<B: BlockchainBackend + 'static> BlockHeaderSyncValidator<B> {
60
    pub fn new(db: AsyncBlockchainDb<B>, consensus_rules: ConsensusManager, randomx_factory: RandomXFactory) -> Self {
31✔
61
        let difficulty_calculator = DifficultyCalculator::new(consensus_rules.clone(), randomx_factory);
31✔
62
        let validator = HeaderFullValidator::new(consensus_rules.clone(), difficulty_calculator);
31✔
63
        Self {
31✔
64
            db,
31✔
65
            state: None,
31✔
66
            consensus_rules,
31✔
67
            validator,
31✔
68
        }
31✔
69
    }
31✔
70

71
    #[allow(clippy::ptr_arg)]
72
    pub async fn initialize_state(&mut self, start_hash: &HashOutput) -> Result<(), BlockHeaderSyncError> {
26✔
73
        let start_header = self
26✔
74
            .db
26✔
75
            .fetch_header_by_block_hash(*start_hash)
26✔
76
            .await?
26✔
77
            .ok_or_else(|| BlockHeaderSyncError::StartHashNotFound(start_hash.to_hex()))?;
26✔
78
        let timestamps = self.db.fetch_block_timestamps(*start_hash).await?;
25✔
79
        let target_difficulties = self.db.fetch_target_difficulties_for_next_block(*start_hash).await?;
25✔
80
        let previous_accum = self
25✔
81
            .db
25✔
82
            .fetch_header_accumulated_data(*start_hash)
25✔
83
            .await?
25✔
84
            .ok_or_else(|| ChainStorageError::ValueNotFound {
25✔
85
                entity: "BlockHeaderAccumulatedData",
×
86
                field: "hash",
×
87
                value: start_hash.to_hex(),
×
88
            })?;
25✔
89
        debug!(
25✔
90
            target: LOG_TARGET,
×
91
            "Setting header validator state ({} timestamp(s), target difficulties: {} SHA3, {} RandomX)",
×
92
            timestamps.len(),
×
93
            target_difficulties.get(PowAlgorithm::Sha3x).len(),
×
94
            target_difficulties.get(PowAlgorithm::RandomX).len(),
×
95
        );
96
        self.state = Some(State {
25✔
97
            current_height: start_header.height,
25✔
98
            timestamps,
25✔
99
            target_difficulties,
25✔
100
            previous_accum,
25✔
101
            previous_header: start_header,
25✔
102
            // One large allocation is usually better even if it is not always used.
25✔
103
            valid_headers: Vec::with_capacity(HEADER_SYNC_INITIAL_MAX_HEADERS),
25✔
104
        });
25✔
105

25✔
106
        Ok(())
25✔
107
    }
26✔
108

109
    pub fn current_valid_chain_tip_header(&self) -> Option<&ChainHeader> {
22✔
110
        self.valid_headers().last()
22✔
111
    }
22✔
112

113
    pub async fn validate(&mut self, header: BlockHeader) -> Result<U256, BlockHeaderSyncError> {
165✔
114
        let state = self.state();
165✔
115
        let constants = self.consensus_rules.consensus_constants(header.height);
165✔
116

165✔
117
        let target_difficulty = state.target_difficulties.get(header.pow_algo()).calculate(
165✔
118
            constants.min_pow_difficulty(header.pow_algo()),
165✔
119
            constants.max_pow_difficulty(header.pow_algo()),
165✔
120
        );
165✔
121

122
        let result = {
165✔
123
            let txn = self.db.inner().db_read_access()?;
165✔
124
            self.validator.validate(
165✔
125
                &*txn,
165✔
126
                &header,
165✔
127
                &state.previous_header,
165✔
128
                &state.timestamps,
165✔
129
                Some(target_difficulty),
165✔
130
            )
165✔
131
        };
132
        let achieved_target = match result {
164✔
133
            Ok(achieved_target) => achieved_target,
164✔
134
            // future timelimit validation can succeed at a later time. As the block is not yet valid, we discard it
135
            // for now and ban the peer, but wont blacklist the block.
136
            Err(e @ ValidationError::BlockHeaderError(BlockHeaderValidationError::InvalidTimestampFutureTimeLimit)) => {
×
137
                return Err(e.into())
×
138
            },
139
            // We dont want to mark a block as bad for internal failures
140
            Err(
141
                e @ ValidationError::FatalStorageError(_) |
×
142
                e @ ValidationError::IncorrectNumberOfTimestampsProvided { .. },
×
143
            ) => return Err(e.into()),
×
144
            // We dont have to mark the block twice
145
            Err(e @ ValidationError::BadBlockFound { .. }) => return Err(e.into()),
×
146

147
            Err(e) => {
1✔
148
                let mut txn = self.db.write_transaction();
1✔
149
                txn.insert_bad_block(header.hash(), header.height, e.to_string());
1✔
150
                txn.commit().await?;
1✔
151
                return Err(e.into());
1✔
152
            },
153
        };
154

155
        // Header is valid, add this header onto the validation state for the next round
156
        // Mutable borrow done later in the function to allow multiple immutable borrows before this line. This has
157
        // nothing to do with locking or concurrency.
158
        let state = self.state_mut();
164✔
159
        state.previous_header = header.clone();
164✔
160

164✔
161
        // Ensure that timestamps are inserted in sorted order
164✔
162
        let maybe_index = state.timestamps.iter().position(|ts| *ts >= header.timestamp());
1,334✔
163
        match maybe_index {
164✔
164
            Some(pos) => {
×
165
                state.timestamps.insert(pos, header.timestamp());
×
166
            },
×
167
            None => {
164✔
168
                state.timestamps.push(header.timestamp());
164✔
169
            },
164✔
170
        }
171

172
        state.current_height = header.height;
164✔
173
        // Add a "more recent" datapoint onto the target difficulty
164✔
174
        state.target_difficulties.add_back(&header, target_difficulty);
164✔
175

176
        let accumulated_data = BlockHeaderAccumulatedData::builder(&state.previous_accum)
164✔
177
            .with_hash(header.hash())
164✔
178
            .with_achieved_target_difficulty(achieved_target)
164✔
179
            .with_total_kernel_offset(header.total_kernel_offset.clone())
164✔
180
            .build()?;
164✔
181

182
        let total_accumulated_difficulty = accumulated_data.total_accumulated_difficulty;
164✔
183
        // NOTE: accumulated_data constructed from header so they are guaranteed to correspond
164✔
184
        let chain_header = ChainHeader::try_construct(header, accumulated_data).unwrap();
164✔
185

164✔
186
        state.previous_accum = chain_header.accumulated_data().clone();
164✔
187
        state.valid_headers.push(chain_header);
164✔
188

164✔
189
        Ok(total_accumulated_difficulty)
164✔
190
    }
165✔
191

192
    /// Drains and returns all the headers that were validated.
193
    ///
194
    /// ## Panics
195
    ///
196
    /// Panics if initialize_state was not called prior to calling this function
197
    pub fn take_valid_headers(&mut self) -> Vec<ChainHeader> {
22✔
198
        self.state_mut().valid_headers.drain(..).collect::<Vec<_>>()
22✔
199
    }
22✔
200

201
    /// Returns a slice containing the current valid headers
202
    ///
203
    /// ## Panics
204
    ///
205
    /// Panics if initialize_state was not called prior to calling this function
206
    pub fn valid_headers(&self) -> &[ChainHeader] {
70✔
207
        &self.state().valid_headers
70✔
208
    }
70✔
209

210
    pub fn compare_chains(&self, our_header: &ChainHeader, their_header: &ChainHeader) -> Ordering {
22✔
211
        debug!(
22✔
212
            target: LOG_TARGET,
×
213
            "Comparing PoW on remote header #{} and local header #{}",
×
214
            their_header.height(),
×
215
            our_header.height()
×
216
        );
217

218
        self.consensus_rules
22✔
219
            .chain_strength_comparer()
22✔
220
            .compare(our_header, their_header)
22✔
221
    }
22✔
222

223
    fn state_mut(&mut self) -> &mut State {
186✔
224
        self.state
186✔
225
            .as_mut()
186✔
226
            .expect("state_mut() called before state was initialized (using the `begin` method)")
186✔
227
    }
186✔
228

229
    fn state(&self) -> &State {
236✔
230
        self.state
236✔
231
            .as_ref()
236✔
232
            .expect("state() called before state was initialized (using the `begin` method)")
236✔
233
    }
236✔
234
}
235

236
#[cfg(test)]
237
mod test {
238
    use tari_common::configuration::Network;
239
    use tari_test_utils::unpack_enum;
240

241
    use super::*;
242
    use crate::{
243
        blocks::BlockHeader,
244
        proof_of_work::PowAlgorithm,
245
        test_helpers::blockchain::{create_new_blockchain, TempDatabase},
246
    };
247

248
    fn setup() -> (
4✔
249
        BlockHeaderSyncValidator<TempDatabase>,
4✔
250
        AsyncBlockchainDb<TempDatabase>,
4✔
251
        ConsensusManager,
4✔
252
    ) {
4✔
253
        let rules = ConsensusManager::builder(Network::LocalNet).build().unwrap();
4✔
254
        let randomx_factory = RandomXFactory::default();
4✔
255
        let db = create_new_blockchain();
4✔
256
        (
4✔
257
            BlockHeaderSyncValidator::new(db.clone().into(), rules.clone(), randomx_factory),
4✔
258
            db.into(),
4✔
259
            rules,
4✔
260
        )
4✔
261
    }
4✔
262

263
    async fn setup_with_headers(
3✔
264
        n: usize,
3✔
265
    ) -> (
3✔
266
        BlockHeaderSyncValidator<TempDatabase>,
3✔
267
        AsyncBlockchainDb<TempDatabase>,
3✔
268
        ChainHeader,
3✔
269
    ) {
3✔
270
        let (validator, db, cm) = setup();
3✔
271
        let mut tip = db.fetch_tip_header().await.unwrap();
3✔
272
        for _ in 0..n {
3✔
273
            let mut header = BlockHeader::from_previous(tip.header());
14✔
274
            header.version = cm.consensus_constants(header.height).blockchain_version();
14✔
275
            // Needed to have unique keys for the blockchain db mmr count indexes (MDB_KEY_EXIST error)
14✔
276
            header.kernel_mmr_size += 1;
14✔
277
            header.output_smt_size += 1;
14✔
278
            let acc_data = BlockHeaderAccumulatedData {
14✔
279
                hash: header.hash(),
14✔
280
                ..Default::default()
14✔
281
            };
14✔
282

14✔
283
            let chain_header = ChainHeader::try_construct(header.clone(), acc_data.clone()).unwrap();
14✔
284
            db.insert_valid_headers(vec![chain_header.clone()]).await.unwrap();
14✔
285
            tip = chain_header;
14✔
286
        }
287

288
        (validator, db, tip)
3✔
289
    }
3✔
290

291
    mod initialize_state {
292
        use std::convert::TryInto;
293

294
        use super::*;
295

296
        #[tokio::test]
297
        async fn it_initializes_state_to_given_header() {
1✔
298
            let (mut validator, _, tip) = setup_with_headers(1).await;
1✔
299
            validator.initialize_state(&tip.header().hash()).await.unwrap();
1✔
300
            let state = validator.state();
1✔
301
            assert!(state.valid_headers.is_empty());
1✔
302
            assert_eq!(state.target_difficulties.get(PowAlgorithm::Sha3x).len(), 2);
1✔
303
            assert!(state.target_difficulties.get(PowAlgorithm::RandomX).is_empty());
1✔
304
            assert_eq!(state.timestamps.len(), 2);
1✔
305
            assert_eq!(state.current_height, 1);
1✔
306
        }
1✔
307

308
        #[tokio::test]
309
        async fn it_errors_if_hash_does_not_exist() {
1✔
310
            let (mut validator, _, _cm) = setup();
1✔
311
            let start_hash = vec![0; 32];
1✔
312
            let err = validator
1✔
313
                .initialize_state(&start_hash.clone().try_into().unwrap())
1✔
314
                .await
1✔
315
                .unwrap_err();
1✔
316
            unpack_enum!(BlockHeaderSyncError::StartHashNotFound(hash) = err);
1✔
317
            assert_eq!(hash, start_hash.to_hex());
1✔
318
        }
1✔
319
    }
320

321
    mod validate {
322
        use super::*;
323

324
        #[tokio::test]
325
        async fn it_passes_if_headers_are_valid() {
1✔
326
            let (mut validator, _, tip) = setup_with_headers(1).await;
1✔
327
            validator.initialize_state(tip.hash()).await.unwrap();
1✔
328
            assert!(validator.valid_headers().is_empty());
1✔
329
            let mut next = BlockHeader::from_previous(tip.header());
1✔
330
            next.timestamp = tip.header().timestamp.checked_add(EpochTime::from(1)).unwrap();
1✔
331
            validator.validate(next).await.unwrap();
1✔
332
            assert_eq!(validator.valid_headers().len(), 1);
1✔
333
            let tip = validator.valid_headers().last().cloned().unwrap();
1✔
334
            let mut next = BlockHeader::from_previous(tip.header());
1✔
335
            next.timestamp = tip.header().timestamp.checked_add(EpochTime::from(1)).unwrap();
1✔
336
            validator.validate(next).await.unwrap();
1✔
337
            assert_eq!(validator.valid_headers().len(), 2);
1✔
338
        }
1✔
339

340
        #[tokio::test]
341
        async fn it_fails_if_height_is_not_serial() {
1✔
342
            let (mut validator, _, tip) = setup_with_headers(12).await;
1✔
343
            validator.initialize_state(tip.hash()).await.unwrap();
1✔
344
            let mut next = BlockHeader::from_previous(tip.header());
1✔
345
            next.height = 14;
1✔
346
            let err = validator.validate(next).await.unwrap_err();
1✔
347
            unpack_enum!(BlockHeaderSyncError::ValidationFailed(val_err) = err);
1✔
348
            unpack_enum!(ValidationError::BlockHeaderError(header_err) = val_err);
1✔
349
            unpack_enum!(BlockHeaderValidationError::InvalidHeight { actual, expected } = header_err);
1✔
350
            assert_eq!(actual, 14);
1✔
351
            assert_eq!(expected, 13);
1✔
352
        }
1✔
353
    }
354
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc