• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

stacks-network / stacks-core / 23555445100

25 Mar 2026 05:42PM UTC coverage: 85.764% (-0.08%) from 85.84%
23555445100

Pull #7036

github

web-flow
Merge 3c5950a49 into 91ed8c178
Pull Request #7036: Feat/improved const callable

28 of 29 new or added lines in 5 files covered. (96.55%)

303 existing lines in 41 files now uncovered.

187402 of 218510 relevant lines covered (85.76%)

17120754.47 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.81
/stacks-node/src/run_loop/neon.rs
1
#[cfg(test)]
2
use std::sync::atomic::AtomicU64;
3
use std::sync::atomic::{AtomicBool, Ordering};
4
use std::sync::mpsc::sync_channel;
5
use std::sync::{Arc, Mutex};
6
use std::thread;
7
use std::thread::JoinHandle;
8

9
use libc;
10
use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType};
11
use stacks::burnchains::{Burnchain, Error as burnchain_error};
12
use stacks::chainstate::burn::db::sortdb::SortitionDB;
13
use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash};
14
use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers};
15
use stacks::chainstate::coordinator::{
16
    migrate_chainstate_dbs, ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication,
17
    Error as coord_error,
18
};
19
use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState};
20
use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus};
21
use stacks::core::StacksEpochId;
22
use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment};
23
#[cfg(test)]
24
use stacks::util::tests::TestFlag;
25
use stacks::util_lib::db::Error as db_error;
26
use stacks_common::deps_common::ctrlc as termination;
27
use stacks_common::deps_common::ctrlc::SignalId;
28
use stacks_common::types::PublicKey;
29
use stacks_common::util::hash::Hash160;
30
use stx_genesis::GenesisData;
31

32
use super::RunLoopCallbacks;
33
use crate::burnchains::{make_bitcoin_indexer, Error};
34
use crate::globals::NeonGlobals as Globals;
35
use crate::monitoring::{start_serving_monitoring_metrics, MonitoringError};
36
use crate::neon_node::{
37
    LeaderKeyRegistrationState, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER,
38
};
39
use crate::node::{
40
    get_account_balances, get_account_lockups, get_names, get_namespaces,
41
    use_test_genesis_chainstate,
42
};
43
use crate::run_loop::boot_nakamoto::Neon2NakaData;
44
use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms};
45
use crate::{
46
    run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain,
47
};
48

49
pub const STDERR: i32 = 2;
50

51
#[cfg(test)]
52
#[derive(Clone, Default)]
53
pub struct RunLoopField<T>(pub Arc<Mutex<T>>);
54

55
#[cfg(not(test))]
56
#[derive(Clone, Default)]
57
pub struct RunLoopField<T>(pub std::marker::PhantomData<T>);
58

59
#[cfg(test)]
60
#[derive(Clone)]
61
pub struct RunLoopCounter(pub Arc<AtomicU64>);
62

63
#[cfg(not(test))]
64
#[derive(Clone)]
65
pub struct RunLoopCounter();
66

67
impl Default for RunLoopCounter {
68
    #[cfg(test)]
69
    fn default() -> Self {
5,943✔
70
        RunLoopCounter(Arc::new(AtomicU64::new(0)))
5,943✔
71
    }
5,943✔
72
    #[cfg(not(test))]
73
    fn default() -> Self {
74
        Self()
75
    }
76
}
77

78
impl<T: Clone> RunLoopField<Option<T>> {
79
    #[cfg(test)]
80
    pub fn get(&self) -> T {
1✔
81
        self.0.lock().unwrap().clone().unwrap()
1✔
82
    }
1✔
83
}
84

85
impl RunLoopCounter {
86
    #[cfg(test)]
87
    pub fn get(&self) -> u64 {
3,011✔
88
        self.0.load(Ordering::SeqCst)
3,011✔
89
    }
3,011✔
90

91
    #[cfg(test)]
92
    pub fn load(&self, ordering: Ordering) -> u64 {
64,100✔
93
        self.0.load(ordering)
64,100✔
94
    }
64,100✔
95
}
96

97
#[cfg(test)]
98
impl std::ops::Deref for RunLoopCounter {
99
    type Target = Arc<AtomicU64>;
100

101
    fn deref(&self) -> &Self::Target {
1,938✔
102
        &self.0
1,938✔
103
    }
1,938✔
104
}
105

106
#[derive(Clone, Default)]
107
pub struct Counters {
108
    pub blocks_processed: RunLoopCounter,
109
    pub microblocks_processed: RunLoopCounter,
110
    pub missed_tenures: RunLoopCounter,
111
    pub missed_microblock_tenures: RunLoopCounter,
112
    pub cancelled_commits: RunLoopCounter,
113

114
    pub sortitions_processed: RunLoopCounter,
115

116
    pub naka_submitted_vrfs: RunLoopCounter,
117
    /// the number of submitted commits
118
    pub neon_submitted_commits: RunLoopCounter,
119
    /// the burn block height when the last commit was submitted
120
    pub neon_submitted_commit_last_burn_height: RunLoopCounter,
121
    pub naka_submitted_commits: RunLoopCounter,
122
    /// the burn block height when the last commit was submitted
123
    pub naka_submitted_commit_last_burn_height: RunLoopCounter,
124
    pub naka_mined_blocks: RunLoopCounter,
125
    pub naka_rejected_blocks: RunLoopCounter,
126
    pub naka_proposed_blocks: RunLoopCounter,
127
    pub naka_mined_tenures: RunLoopCounter,
128
    pub naka_signer_pushed_blocks: RunLoopCounter,
129
    pub naka_miner_directives: RunLoopCounter,
130
    pub naka_submitted_commit_last_stacks_tip: RunLoopCounter,
131
    pub naka_submitted_commit_last_commit_amount: RunLoopCounter,
132
    pub naka_submitted_commit_last_parent_tenure_id: RunLoopField<Option<ConsensusHash>>,
133

134
    pub naka_miner_current_rejections: RunLoopCounter,
135
    pub naka_miner_current_rejections_timeout_secs: RunLoopCounter,
136

137
    #[cfg(test)]
138
    pub skip_commit_op: TestFlag<bool>,
139
}
140

141
impl Counters {
142
    pub fn new() -> Self {
1✔
143
        Self::default()
1✔
144
    }
1✔
145

146
    #[cfg(test)]
147
    fn inc(ctr: &RunLoopCounter) {
32,654✔
148
        ctr.0.fetch_add(1, Ordering::SeqCst);
32,654✔
149
    }
32,654✔
150

151
    #[cfg(not(test))]
152
    fn inc(_ctr: &RunLoopCounter) {}
153

154
    #[cfg(test)]
155
    fn set(ctr: &RunLoopCounter, value: u64) {
11,931✔
156
        ctr.0.store(value, Ordering::SeqCst);
11,931✔
157
    }
11,931✔
158

159
    #[cfg(not(test))]
160
    fn set(_ctr: &RunLoopCounter, _value: u64) {}
161

162
    #[cfg(test)]
163
    fn update<T: Clone>(ctr: &RunLoopField<Option<T>>, value: &T) {
1,644✔
164
        let mut mutex = ctr.0.lock().expect("FATAL: test counter mutext poisoned");
1,644✔
165
        let _ = mutex.replace(value.clone());
1,644✔
166
    }
1,644✔
167

168
    #[cfg(not(test))]
169
    fn update<T: Clone>(_ctr: &RunLoopField<Option<T>>, _value: &T) {}
170

171
    pub fn bump_blocks_processed(&self) {
11,715✔
172
        Counters::inc(&self.blocks_processed);
11,715✔
173
    }
11,715✔
174

175
    pub fn bump_sortitions_processed(&self) {
4,270✔
176
        Counters::inc(&self.sortitions_processed);
4,270✔
177
    }
4,270✔
178

179
    pub fn bump_microblocks_processed(&self) {
×
180
        Counters::inc(&self.microblocks_processed);
×
181
    }
×
182

183
    pub fn bump_missed_tenures(&self) {
1,078✔
184
        Counters::inc(&self.missed_tenures);
1,078✔
185
    }
1,078✔
186

187
    pub fn bump_missed_microblock_tenures(&self) {
×
188
        Counters::inc(&self.missed_microblock_tenures);
×
189
    }
×
190

191
    pub fn bump_cancelled_commits(&self) {
×
192
        Counters::inc(&self.cancelled_commits);
×
193
    }
×
194

195
    pub fn bump_neon_submitted_commits(&self, committed_burn_height: u64) {
6,630✔
196
        Counters::inc(&self.neon_submitted_commits);
6,630✔
197
        Counters::set(
6,630✔
198
            &self.neon_submitted_commit_last_burn_height,
6,630✔
199
            committed_burn_height,
6,630✔
200
        );
201
    }
6,630✔
202

203
    pub fn bump_naka_submitted_vrfs(&self) {
37✔
204
        Counters::inc(&self.naka_submitted_vrfs);
37✔
205
    }
37✔
206

207
    pub fn bump_naka_submitted_commits(
1,644✔
208
        &self,
1,644✔
209
        committed_burn_height: u64,
1,644✔
210
        committed_stacks_height: u64,
1,644✔
211
        committed_sats_amount: u64,
1,644✔
212
        committed_parent_tenure_id: &ConsensusHash,
1,644✔
213
    ) {
1,644✔
214
        Counters::inc(&self.naka_submitted_commits);
1,644✔
215
        Counters::set(
1,644✔
216
            &self.naka_submitted_commit_last_burn_height,
1,644✔
217
            committed_burn_height,
1,644✔
218
        );
219
        Counters::set(
1,644✔
220
            &self.naka_submitted_commit_last_stacks_tip,
1,644✔
221
            committed_stacks_height,
1,644✔
222
        );
223
        Counters::set(
1,644✔
224
            &self.naka_submitted_commit_last_commit_amount,
1,644✔
225
            committed_sats_amount,
1,644✔
226
        );
227
        Counters::update(
1,644✔
228
            &self.naka_submitted_commit_last_parent_tenure_id,
1,644✔
229
            committed_parent_tenure_id,
1,644✔
230
        );
231
    }
1,644✔
232

233
    pub fn bump_naka_mined_blocks(&self) {
2,194✔
234
        Counters::inc(&self.naka_mined_blocks);
2,194✔
235
    }
2,194✔
236

237
    pub fn bump_naka_proposed_blocks(&self) {
2,281✔
238
        Counters::inc(&self.naka_proposed_blocks);
2,281✔
239
    }
2,281✔
240

241
    pub fn bump_naka_rejected_blocks(&self) {
91✔
242
        Counters::inc(&self.naka_rejected_blocks);
91✔
243
    }
91✔
244

245
    pub fn bump_naka_signer_pushed_blocks(&self) {
11✔
246
        Counters::inc(&self.naka_signer_pushed_blocks);
11✔
247
    }
11✔
248

249
    pub fn bump_naka_mined_tenures(&self) {
1,253✔
250
        Counters::inc(&self.naka_mined_tenures);
1,253✔
251
    }
1,253✔
252

253
    pub fn bump_naka_miner_directives(&self) {
1,450✔
254
        Counters::inc(&self.naka_miner_directives);
1,450✔
255
    }
1,450✔
256

257
    pub fn set_microblocks_processed(&self, value: u64) {
105✔
258
        Counters::set(&self.microblocks_processed, value)
105✔
259
    }
105✔
260

261
    pub fn set_miner_current_rejections_timeout_secs(&self, value: u64) {
132✔
262
        Counters::set(&self.naka_miner_current_rejections_timeout_secs, value)
132✔
263
    }
132✔
264

265
    pub fn set_miner_current_rejections(&self, value: u32) {
132✔
266
        Counters::set(&self.naka_miner_current_rejections, u64::from(value))
132✔
267
    }
132✔
268
}
269

270
/// Coordinating a node running in neon mode.
271
pub struct RunLoop {
272
    config: Config,
273
    pub callbacks: RunLoopCallbacks,
274
    globals: Option<Globals>,
275
    counters: Counters,
276
    coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>,
277
    should_keep_running: Arc<AtomicBool>,
278
    event_dispatcher: EventDispatcher,
279
    pox_watchdog: Option<PoxSyncWatchdog>, // can't be instantiated until .start() is called
280
    is_miner: Option<bool>,                // not known until .start() is called
281
    burnchain: Option<Burnchain>,          // not known until .start() is called
282
    pox_watchdog_comms: PoxSyncWatchdogComms,
283
    /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is
284
    /// instantiated (namely, so the test framework can access it).
285
    miner_status: Arc<Mutex<MinerStatus>>,
286
    monitoring_thread: Option<JoinHandle<Result<(), MonitoringError>>>,
287
}
288

289
/// Write to stderr in an async-safe manner.
290
/// See signal-safety(7)
291
fn async_safe_write_stderr(msg: &str) {
×
292
    #[cfg(windows)]
293
    unsafe {
294
        // write(2) inexplicably has a different ABI only on Windows.
295
        libc::write(
296
            STDERR,
297
            msg.as_ptr() as *const libc::c_void,
298
            msg.len() as u32,
299
        );
300
    }
301
    #[cfg(not(windows))]
302
    unsafe {
×
303
        libc::write(STDERR, msg.as_ptr() as *const libc::c_void, msg.len());
×
304
    }
×
305
}
×
306

307
impl RunLoop {
308
    /// Sets up a runloop and node, given a config.
309
    pub fn new(config: Config) -> Self {
280✔
310
        let channels = CoordinatorCommunication::instantiate();
280✔
311
        let should_keep_running = Arc::new(AtomicBool::new(true));
280✔
312
        let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone());
280✔
313
        let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready(
280✔
314
            config.burnchain.burn_fee_cap,
280✔
315
        )));
316

317
        let mut event_dispatcher = EventDispatcher::new(config.get_working_dir());
280✔
318
        for observer in config.events_observers.iter() {
980✔
319
            event_dispatcher.register_observer(observer);
955✔
320
        }
955✔
321
        event_dispatcher.process_pending_payloads();
280✔
322

323
        Self {
280✔
324
            config,
280✔
325
            globals: None,
280✔
326
            coordinator_channels: Some(channels),
280✔
327
            callbacks: RunLoopCallbacks::new(),
280✔
328
            counters: Counters::default(),
280✔
329
            should_keep_running,
280✔
330
            event_dispatcher,
280✔
331
            pox_watchdog: None,
280✔
332
            is_miner: None,
280✔
333
            burnchain: None,
280✔
334
            pox_watchdog_comms,
280✔
335
            miner_status,
280✔
336
            monitoring_thread: None,
280✔
337
        }
280✔
338
    }
280✔
339

340
    pub fn get_globals(&self) -> Globals {
554✔
341
        self.globals
554✔
342
            .clone()
554✔
343
            .expect("FATAL: globals not instantiated")
554✔
344
    }
554✔
345

346
    fn set_globals(&mut self, globals: Globals) {
277✔
347
        self.globals = Some(globals);
277✔
348
    }
277✔
349

350
    pub fn get_coordinator_channel(&self) -> Option<CoordinatorChannels> {
278✔
351
        self.coordinator_channels.as_ref().map(|x| x.1.clone())
278✔
352
    }
278✔
353

354
    pub fn get_blocks_processed_arc(&self) -> RunLoopCounter {
37✔
355
        self.counters.blocks_processed.clone()
37✔
356
    }
37✔
357

358
    pub fn get_microblocks_processed_arc(&self) -> RunLoopCounter {
×
359
        self.counters.microblocks_processed.clone()
×
360
    }
×
361

362
    pub fn get_missed_tenures_arc(&self) -> RunLoopCounter {
1✔
363
        self.counters.missed_tenures.clone()
1✔
364
    }
1✔
365

366
    pub fn get_missed_microblock_tenures_arc(&self) -> RunLoopCounter {
×
367
        self.counters.missed_microblock_tenures.clone()
×
368
    }
×
369

370
    pub fn get_cancelled_commits_arc(&self) -> RunLoopCounter {
×
371
        self.counters.cancelled_commits.clone()
×
372
    }
×
373

374
    pub fn get_counters(&self) -> Counters {
535✔
375
        self.counters.clone()
535✔
376
    }
535✔
377

378
    pub fn config(&self) -> &Config {
62,801✔
379
        &self.config
62,801✔
380
    }
62,801✔
381

382
    pub fn get_event_dispatcher(&self) -> EventDispatcher {
554✔
383
        self.event_dispatcher.clone()
554✔
384
    }
554✔
385

386
    pub fn is_miner(&self) -> bool {
277✔
387
        self.is_miner.unwrap_or(false)
277✔
388
    }
277✔
389

390
    pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms {
1✔
391
        self.pox_watchdog_comms.clone()
1✔
392
    }
1✔
393

394
    pub fn get_termination_switch(&self) -> Arc<AtomicBool> {
728✔
395
        self.should_keep_running.clone()
728✔
396
    }
728✔
397

398
    pub fn get_burnchain(&self) -> Burnchain {
1,108✔
399
        self.burnchain
1,108✔
400
            .clone()
1,108✔
401
            .expect("FATAL: tried to get runloop burnchain before calling .start()")
1,108✔
402
    }
1,108✔
403

404
    pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog {
353,482✔
405
        self.pox_watchdog
353,482✔
406
            .as_mut()
353,482✔
407
            .expect("FATAL: tried to get PoX watchdog before calling .start()")
353,482✔
408
    }
353,482✔
409

410
    pub fn get_miner_status(&self) -> Arc<Mutex<MinerStatus>> {
277✔
411
        self.miner_status.clone()
277✔
412
    }
277✔
413

414
    /// Set up termination handler.  Have a signal set the `should_keep_running` atomic bool to
415
    /// false.  Panics of called more than once.
416
    pub fn setup_termination_handler(keep_running_writer: Arc<AtomicBool>, allow_err: bool) {
518✔
417
        let install = termination::set_handler(move |sig_id| match sig_id {
518✔
418
            SignalId::Bus => {
419
                let msg = "Caught SIGBUS; crashing immediately and dumping core\n";
×
420
                async_safe_write_stderr(msg);
×
421
                unsafe {
422
                    libc::abort();
×
423
                }
424
            }
425
            _ => {
×
426
                let msg = format!("Graceful termination request received (signal `{sig_id}`), will complete the ongoing runloop cycles and terminate\n");
×
427
                async_safe_write_stderr(&msg);
×
428
                keep_running_writer.store(false, Ordering::SeqCst);
×
429
            }
×
430
        });
×
431

432
        if let Err(e) = install {
518✔
433
            // integration tests can do this
434
            if cfg!(test) || allow_err {
290✔
435
                info!("Error setting up signal handler, may have already been set");
290✔
436
            } else {
437
                panic!("FATAL: error setting termination handler - {e}");
×
438
            }
439
        }
228✔
440
    }
518✔
441

442
    /// Seconds to wait before retrying UTXO check during startup
443
    const UTXO_RETRY_INTERVAL: u64 = 10;
444
    /// Number of times to retry UTXO check during startup
445
    const UTXO_RETRY_COUNT: u64 = 6;
446

447
    /// Determine if we're the miner.
448
    /// If there's a network error, then assume that we're not a miner.
449
    fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool {
278✔
450
        if self.config.node.miner {
278✔
451
            // If we are mock mining, then we don't need to check for UTXOs and
452
            // we can just return true.
453
            if self.config.get_node_config(false).mock_mining {
273✔
454
                return true;
5✔
455
            }
268✔
456
            let keychain = Keychain::default(self.config.node.seed.clone());
268✔
457
            let mut op_signer = keychain.generate_op_signer();
268✔
458
            if let Err(e) = burnchain.create_wallet_if_dne() {
268✔
459
                warn!("Error when creating wallet: {e:?}");
×
460
            }
268✔
461
            let mut btc_addrs = vec![(
268✔
462
                StacksEpochId::Epoch2_05,
268✔
463
                // legacy
268✔
464
                BitcoinAddress::from_bytes_legacy(
268✔
465
                    self.config.burnchain.get_bitcoin_network().1,
268✔
466
                    LegacyBitcoinAddressType::PublicKeyHash,
268✔
467
                    &Hash160::from_data(&op_signer.get_public_key().to_bytes()).0,
268✔
468
                )
268✔
469
                .expect("FATAL: failed to construct legacy bitcoin address"),
268✔
470
            )];
268✔
471
            if self.config.miner.segwit {
268✔
UNCOV
472
                btc_addrs.push((
×
UNCOV
473
                    StacksEpochId::Epoch21,
×
UNCOV
474
                    // segwit p2wpkh
×
UNCOV
475
                    BitcoinAddress::from_bytes_segwit_p2wpkh(
×
UNCOV
476
                        self.config.burnchain.get_bitcoin_network().1,
×
UNCOV
477
                        &Hash160::from_data(&op_signer.get_public_key().to_bytes_compressed()).0,
×
UNCOV
478
                    )
×
UNCOV
479
                    .expect("FATAL: failed to construct segwit p2wpkh address"),
×
UNCOV
480
                ));
×
481
            }
268✔
482

483
            // retry UTXO check a few times, in case bitcoind is still starting up
484
            for _ in 0..Self::UTXO_RETRY_COUNT {
268✔
485
                for (epoch_id, btc_addr) in &btc_addrs {
276✔
486
                    info!("Miner node: checking UTXOs at address: {btc_addr}");
276✔
487
                    let utxos =
276✔
488
                        burnchain.get_utxos(*epoch_id, &op_signer.get_public_key(), 1, None, 0);
276✔
489
                    if utxos.is_none() {
276✔
490
                        warn!("UTXOs not found for {btc_addr}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {btc_addr} (importaddress)");
9✔
491
                    } else {
492
                        info!("UTXOs found - will run as a Miner node");
267✔
493
                        return true;
267✔
494
                    }
495
                }
496
                thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL));
9✔
497
            }
498
            panic!("No UTXOs found, exiting");
1✔
499
        } else {
500
            info!("Will run as a Follower node");
5✔
501
            false
5✔
502
        }
503
    }
277✔
504

505
    /// Instantiate the burnchain client and databases.
506
    /// Fetches headers and instantiates the burnchain.
507
    /// Panics on failure.
508
    pub fn instantiate_burnchain_state(
518✔
509
        config: &Config,
518✔
510
        should_keep_running: Arc<AtomicBool>,
518✔
511
        burnchain_opt: Option<Burnchain>,
518✔
512
        coordinator_senders: CoordinatorChannels,
518✔
513
    ) -> Result<BitcoinRegtestController, burnchain_error> {
518✔
514
        // Initialize and start the burnchain.
515
        let mut burnchain_controller = BitcoinRegtestController::with_burnchain(
518✔
516
            config.clone(),
518✔
517
            Some(coordinator_senders),
518✔
518
            burnchain_opt,
518✔
519
            Some(should_keep_running.clone()),
518✔
520
        );
521

522
        let burnchain = burnchain_controller.get_burnchain();
518✔
523
        let epochs = burnchain_controller.get_stacks_epochs();
518✔
524

525
        // sanity check -- epoch data must be valid
526
        Config::assert_valid_epoch_settings(&burnchain, &epochs);
518✔
527

528
        // Upgrade chainstate databases if they exist already
529
        // NOTE: this has to be done before the subsequent call to
530
        // `burnchain_controller.connect_dbs()` below!
531
        match migrate_chainstate_dbs(
518✔
532
            &epochs,
518✔
533
            &burnchain,
518✔
534
            &config.get_burn_db_file_path(),
518✔
535
            &config.get_chainstate_path_str(),
518✔
536
            Some(config.node.get_marf_opts()),
518✔
537
        ) {
538
            Ok(_) => {}
518✔
539
            Err(coord_error::DBError(db_error::TooOldForEpoch)) => {
540
                error!(
×
541
                    "FATAL: chainstate database(s) are not compatible with the current system epoch"
542
                );
543
                panic!();
×
544
            }
545
            Err(e) => {
×
546
                panic!("FATAL: unable to query filesystem or databases: {e:?}");
×
547
            }
548
        }
549

550
        info!("Start syncing Bitcoin headers, feel free to grab a cup of coffee, this can take a while");
518✔
551

552
        let burnchain_config = burnchain_controller.get_burnchain();
518✔
553
        let target_burnchain_block_height = match burnchain_config
518✔
554
            .get_highest_burnchain_block()
518✔
555
            .expect("FATAL: failed to access burnchain database")
518✔
556
        {
557
            Some(burnchain_tip) => {
240✔
558
                // database exists already, and has blocks -- just sync to its tip.
559
                let target_height = burnchain_tip.block_height + 1;
240✔
560
                debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {target_height}", burnchain_tip.block_height);
240✔
561
                target_height
240✔
562
            }
563
            None => {
564
                // database does not exist yet
565
                let target_height = 1.max(burnchain_config.first_block_height + 1);
278✔
566
                debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {target_height}");
278✔
567
                target_height
278✔
568
            }
569
        };
570

571
        burnchain_controller
518✔
572
            .start(Some(target_burnchain_block_height))
518✔
573
            .map_err(|e| {
518✔
574
                if matches!(e, Error::CoordinatorClosed)
1✔
575
                    && !should_keep_running.load(Ordering::SeqCst)
1✔
576
                {
577
                    info!("Shutdown initiated during burnchain initialization: {e}");
1✔
578
                    return burnchain_error::ShutdownInitiated;
1✔
579
                }
×
580
                error!("Burnchain controller stopped: {e}");
×
581
                panic!();
×
582
            })?;
1✔
583

584
        // if the chainstate DBs don't exist, this will instantiate them
585
        if let Err(e) = burnchain_controller.connect_dbs() {
517✔
586
            error!("Failed to connect to burnchain databases: {e}");
×
587
            panic!();
×
588
        };
517✔
589

590
        // TODO (hack) instantiate the sortdb in the burnchain
591
        let _ = burnchain_controller.sortdb_mut();
517✔
592
        Ok(burnchain_controller)
517✔
593
    }
518✔
594

595
    /// Boot up the stacks chainstate.
596
    /// Instantiate the chainstate and push out the boot receipts to observers
597
    /// This is only public so we can test it.
598
    pub fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState {
278✔
599
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
278✔
600

601
        // load up genesis balances
602
        let initial_balances = self
278✔
603
            .config
278✔
604
            .initial_balances
278✔
605
            .iter()
278✔
606
            .map(|e| (e.address.clone(), e.amount))
2,123✔
607
            .collect();
278✔
608

609
        // instantiate chainstate
610
        let mut boot_data = ChainStateBootData {
278✔
611
            initial_balances,
278✔
612
            post_flight_callback: None,
278✔
613
            first_burnchain_block_hash: burnchain_config.first_block_hash.clone(),
278✔
614
            first_burnchain_block_height: burnchain_config.first_block_height as u32,
278✔
615
            first_burnchain_block_timestamp: burnchain_config.first_block_timestamp,
278✔
616
            pox_constants: burnchain_config.pox_constants.clone(),
278✔
617
            get_bulk_initial_lockups: Some(Box::new(move || {
278✔
618
                get_account_lockups(use_test_genesis_data)
278✔
619
            })),
278✔
620
            get_bulk_initial_balances: Some(Box::new(move || {
278✔
621
                get_account_balances(use_test_genesis_data)
278✔
622
            })),
278✔
623
            get_bulk_initial_namespaces: Some(Box::new(move || {
278✔
624
                get_namespaces(use_test_genesis_data)
278✔
625
            })),
278✔
626
            get_bulk_initial_names: Some(Box::new(move || get_names(use_test_genesis_data))),
278✔
627
        };
628

629
        info!("About to call open_and_exec");
278✔
630
        let (chain_state_db, receipts) = StacksChainState::open_and_exec(
278✔
631
            self.config.is_mainnet(),
278✔
632
            self.config.burnchain.chain_id,
278✔
633
            &self.config.get_chainstate_path_str(),
278✔
634
            Some(&mut boot_data),
278✔
635
            Some(self.config.node.get_marf_opts()),
278✔
636
        )
278✔
637
        .unwrap();
278✔
638
        run_loop::announce_boot_receipts(
278✔
639
            &mut self.event_dispatcher,
278✔
640
            &chain_state_db,
278✔
641
            &burnchain_config.pox_constants,
278✔
642
            &receipts,
278✔
643
        );
644
        chain_state_db
278✔
645
    }
278✔
646

647
    /// Instantiate the Stacks chain state and start the chains coordinator thread.
648
    /// Returns the coordinator thread handle, and the receiving end of the coordinator's atlas
649
    /// attachment channel.
650
    fn spawn_chains_coordinator(
277✔
651
        &mut self,
277✔
652
        burnchain_config: &Burnchain,
277✔
653
        coordinator_receivers: CoordinatorReceivers,
277✔
654
        miner_status: Arc<Mutex<MinerStatus>>,
277✔
655
    ) -> JoinHandle<()> {
277✔
656
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
277✔
657

658
        // load up genesis Atlas attachments
659
        let mut atlas_config = AtlasConfig::new(self.config.is_mainnet());
277✔
660
        let genesis_attachments = GenesisData::new(use_test_genesis_data)
277✔
661
            .read_name_zonefiles()
277✔
662
            .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec()))
3,324✔
663
            .collect();
277✔
664
        atlas_config.genesis_attachments = Some(genesis_attachments);
277✔
665

666
        let chain_state_db = self.boot_chainstate(burnchain_config);
277✔
667

668
        // NOTE: re-instantiate AtlasConfig so we don't have to keep the genesis attachments around
669
        let moved_atlas_config = self.config.atlas.clone();
277✔
670
        let moved_config = self.config.clone();
277✔
671
        let moved_burnchain_config = burnchain_config.clone();
277✔
672
        let coordinator_dispatcher = self.event_dispatcher.clone();
277✔
673
        let atlas_db = AtlasDB::connect(
277✔
674
            moved_atlas_config.clone(),
277✔
675
            &self.config.get_atlas_db_file_path(),
277✔
676
            true,
677
        )
678
        .expect("Failed to connect Atlas DB during startup");
277✔
679
        let coordinator_indexer =
277✔
680
            make_bitcoin_indexer(&self.config, Some(self.should_keep_running.clone()));
277✔
681

682
        let coordinator_thread_handle = thread::Builder::new()
277✔
683
            .name(format!(
277✔
684
                "chains-coordinator-{}",
685
                &moved_config.node.rpc_bind
277✔
686
            ))
687
            .stack_size(BLOCK_PROCESSOR_STACK_SIZE)
277✔
688
            .spawn(move || {
277✔
689
                debug!(
277✔
690
                    "chains-coordinator thread ID is {:?}",
691
                    thread::current().id()
×
692
                );
693
                let mut cost_estimator = moved_config.make_cost_estimator();
277✔
694
                let mut fee_estimator = moved_config.make_fee_estimator();
277✔
695

696
                let coord_config = ChainsCoordinatorConfig {
277✔
697
                    txindex: moved_config.node.txindex,
277✔
698
                };
277✔
699
                ChainsCoordinator::run(
277✔
700
                    coord_config,
277✔
701
                    chain_state_db,
277✔
702
                    moved_burnchain_config,
277✔
703
                    &coordinator_dispatcher,
277✔
704
                    coordinator_receivers,
277✔
705
                    moved_atlas_config,
277✔
706
                    cost_estimator.as_deref_mut(),
277✔
707
                    fee_estimator.as_deref_mut(),
277✔
708
                    miner_status,
277✔
709
                    coordinator_indexer,
277✔
710
                    atlas_db,
277✔
711
                );
712
            })
277✔
713
            .expect("FATAL: failed to start chains coordinator thread");
277✔
714

715
        coordinator_thread_handle
277✔
716
    }
277✔
717

718
    /// Instantiate the PoX watchdog
719
    fn instantiate_pox_watchdog(&mut self) {
277✔
720
        let pox_watchdog = PoxSyncWatchdog::new(&self.config, self.pox_watchdog_comms.clone())
277✔
721
            .expect("FATAL: failed to instantiate PoX sync watchdog");
277✔
722
        self.pox_watchdog = Some(pox_watchdog);
277✔
723
    }
277✔
724

725
    /// Start Prometheus logging
726
    fn start_prometheus(&mut self) {
277✔
727
        let Some(prometheus_bind) = self.config.node.prometheus_bind.clone() else {
277✔
728
            return;
269✔
729
        };
730
        let monitoring_thread = thread::Builder::new()
8✔
731
            .name("prometheus".to_string())
8✔
732
            .spawn(move || {
8✔
733
                debug!("prometheus thread ID is {:?}", thread::current().id());
8✔
734
                start_serving_monitoring_metrics(prometheus_bind)
8✔
735
            })
8✔
736
            .expect("FATAL: failed to start monitoring thread");
8✔
737

738
        self.monitoring_thread.replace(monitoring_thread);
8✔
739
    }
277✔
740

741
    pub fn take_monitoring_thread(&mut self) -> Option<JoinHandle<Result<(), MonitoringError>>> {
238✔
742
        self.monitoring_thread.take()
238✔
743
    }
238✔
744

745
    /// Get the sortition DB's highest block height, aligned to a reward cycle boundary, and the
746
    /// highest sortition.
747
    /// Returns (height at rc start, sortition)
748
    fn get_reward_cycle_sortition_db_height(
277✔
749
        sortdb: &SortitionDB,
277✔
750
        burnchain_config: &Burnchain,
277✔
751
    ) -> (u64, BlockSnapshot) {
277✔
752
        let (stacks_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())
277✔
753
            .expect("BUG: failed to load canonical stacks chain tip hash");
277✔
754

755
        let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_ch)
277✔
756
            .expect("BUG: failed to query sortition DB")
277✔
757
        {
758
            Some(sn) => sn,
277✔
759
            None => {
760
                debug!("No canonical stacks chain tip hash present");
×
761
                let sn = SortitionDB::get_first_block_snapshot(sortdb.conn())
×
762
                    .expect("BUG: failed to get first-ever block snapshot");
×
763
                sn
×
764
            }
765
        };
766

767
        (
277✔
768
            burnchain_config.reward_cycle_to_block_height(
277✔
769
                burnchain_config
277✔
770
                    .block_height_to_reward_cycle(sn.block_height)
277✔
771
                    .expect("BUG: snapshot preceeds first reward cycle"),
277✔
772
            ),
277✔
773
            sn,
277✔
774
        )
277✔
775
    }
277✔
776

777
    /// Starts the node runloop.
778
    ///
779
    /// This function will block by looping infinitely.
780
    /// It will start the burnchain (separate thread), set-up a channel in
781
    /// charge of coordinating the new blocks coming from the burnchain and
782
    /// the nodes, taking turns on tenures.
783
    ///
784
    /// Returns `Option<NeonGlobals>` so that data can be passed to `NakamotoNode`
785
    pub fn start(
279✔
786
        &mut self,
279✔
787
        burnchain_opt: Option<Burnchain>,
279✔
788
        mut mine_start: u64,
279✔
789
    ) -> Option<Neon2NakaData> {
279✔
790
        let (coordinator_receivers, coordinator_senders) = self
279✔
791
            .coordinator_channels
279✔
792
            .take()
279✔
793
            .expect("Run loop already started, can only start once after initialization.");
279✔
794

795
        Self::setup_termination_handler(self.should_keep_running.clone(), false);
279✔
796

797
        let burnchain_result = Self::instantiate_burnchain_state(
279✔
798
            &self.config,
279✔
799
            self.should_keep_running.clone(),
279✔
800
            burnchain_opt,
279✔
801
            coordinator_senders.clone(),
279✔
802
        );
803

804
        let mut burnchain = match burnchain_result {
278✔
805
            Ok(burnchain_controller) => burnchain_controller,
278✔
806
            Err(burnchain_error::ShutdownInitiated) => {
807
                info!("Exiting stacks-node");
1✔
808
                return None;
1✔
809
            }
810
            Err(e) => {
×
811
                error!("Error initializing burnchain: {e}");
×
812
                info!("Exiting stacks-node");
×
813
                return None;
×
814
            }
815
        };
816

817
        let burnchain_config = burnchain.get_burnchain();
278✔
818
        self.burnchain = Some(burnchain_config.clone());
278✔
819

820
        // can we mine?
821
        let is_miner = self.check_is_miner(&mut burnchain);
278✔
822
        self.is_miner = Some(is_miner);
278✔
823

824
        // relayer linkup
825
        let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER);
278✔
826

827
        // set up globals so other subsystems can instantiate off of the runloop state.
828
        let globals = Globals::new(
278✔
829
            coordinator_senders,
278✔
830
            self.get_miner_status(),
278✔
831
            relay_send,
278✔
832
            self.counters.clone(),
278✔
833
            self.pox_watchdog_comms.clone(),
278✔
834
            self.should_keep_running.clone(),
278✔
835
            mine_start,
278✔
836
            LeaderKeyRegistrationState::default(),
278✔
837
        );
838
        self.set_globals(globals.clone());
278✔
839

840
        // have headers; boot up the chains coordinator and instantiate the chain state
841
        let coordinator_thread_handle = self.spawn_chains_coordinator(
278✔
842
            &burnchain_config,
278✔
843
            coordinator_receivers,
278✔
844
            globals.get_miner_status(),
278✔
845
        );
846
        self.instantiate_pox_watchdog();
278✔
847
        self.start_prometheus();
278✔
848

849
        // We announce a new burn block so that the chains coordinator
850
        // can resume prior work and handle eventual unprocessed sortitions
851
        // stored during a previous session.
852
        globals.coord().announce_new_burn_block();
278✔
853

854
        // Make sure at least one sortition has happened, and make sure it's globally available
855
        let sortdb = burnchain.sortdb_mut();
278✔
856
        let (rc_aligned_height, sn) =
278✔
857
            RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config);
278✔
858

859
        let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height {
278✔
860
            // need at least one sortition to happen.
861
            burnchain
277✔
862
                .wait_for_sortitions(globals.coord().clone(), sn.block_height + 1)
277✔
863
                .expect("Unable to get burnchain tip")
277✔
864
                .block_snapshot
277✔
865
        } else {
866
            sn
1✔
867
        };
868

869
        globals.set_last_sortition(burnchain_tip_snapshot);
278✔
870

871
        // Boot up the p2p network and relayer, and figure out how many sortitions we have so far
872
        // (it could be non-zero if the node is resuming from chainstate)
873
        let mut node = StacksNode::spawn(self, globals.clone(), relay_recv);
278✔
874

875
        // Wait for all pending sortitions to process
876
        let burnchain_db = burnchain_config
278✔
877
            .open_burnchain_db(true)
278✔
878
            .expect("FATAL: failed to open burnchain DB");
278✔
879
        let burnchain_db_tip = burnchain_db
278✔
880
            .get_canonical_chain_tip()
278✔
881
            .expect("FATAL: failed to query burnchain DB");
278✔
882
        let mut burnchain_tip = burnchain
278✔
883
            .wait_for_sortitions(globals.coord().clone(), burnchain_db_tip.block_height)
278✔
884
            .expect("Unable to get burnchain tip");
278✔
885

886
        // Start the runloop
887
        debug!("Runloop: Begin run loop");
278✔
888
        self.counters.bump_blocks_processed();
278✔
889

890
        let mut sortition_db_height = rc_aligned_height;
278✔
891
        let mut burnchain_height = sortition_db_height;
278✔
892

893
        // prepare to fetch the first reward cycle!
894
        debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}");
278✔
895

896
        let mut last_tenure_sortition_height = 0;
278✔
897

898
        loop {
899
            if !globals.keep_running() {
353,623✔
900
                // The p2p thread relies on the same atomic_bool, it will
901
                // discontinue its execution after completing its ongoing runloop epoch.
902
                info!("Terminating p2p process");
141✔
903
                info!("Terminating relayer");
141✔
904
                info!("Terminating chains-coordinator");
141✔
905

906
                globals.coord().stop_chains_coordinator();
141✔
907
                coordinator_thread_handle.join().unwrap();
141✔
908
                let peer_network = node.join();
141✔
909

910
                // Data that will be passed to Nakamoto run loop
911
                // Only gets transferred on clean shutdown of neon run loop
912
                let data_to_naka = Neon2NakaData::new(globals, peer_network);
141✔
913

914
                info!("Exiting stacks-node");
141✔
915
                break Some(data_to_naka);
141✔
916
            }
353,482✔
917

918
            let remote_chain_height = burnchain.get_headers_height() - 1;
353,482✔
919

920
            // wait until it's okay to process the next reward cycle's sortitions.
921
            let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait(
353,482✔
922
                &burnchain_config,
353,482✔
923
                &burnchain_tip,
353,482✔
924
                remote_chain_height,
353,482✔
925
            ) {
353,482✔
926
                Ok(x) => x,
353,397✔
927
                Err(e) => {
85✔
928
                    debug!("Runloop: PoX sync wait routine aborted: {e:?}");
85✔
929
                    continue;
85✔
930
                }
931
            };
932

933
            // calculate burnchain sync percentage
934
            let percent: f64 = if remote_chain_height > 0 {
353,397✔
935
                burnchain_tip.block_snapshot.block_height as f64 / remote_chain_height as f64
353,395✔
936
            } else {
937
                0.0
2✔
938
            };
939

940
            // Download each burnchain block and process their sortitions.  This, in turn, will
941
            // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and
942
            // process them.  This loop runs for one reward cycle, so that the next pass of the
943
            // runloop will cause the PoX sync watchdog to wait until it believes that the node has
944
            // obtained all the Stacks blocks it can.
945
            debug!(
353,397✔
946
                "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})",
947
                burnchain_config
×
948
                    .block_height_to_reward_cycle(target_burnchain_block_height)
×
949
                    .expect("FATAL: target burnchain block height does not have a reward cycle");
×
950
                "total_burn_sync_percent" => %percent,
951
                "local_burn_height" => burnchain_tip.block_snapshot.block_height,
×
952
                "remote_tip_height" => remote_chain_height
×
953
            );
954

955
            loop {
956
                if !globals.keep_running() {
353,437✔
957
                    break;
44✔
958
                }
353,393✔
959

960
                let (next_burnchain_tip, tip_burnchain_height) =
353,351✔
961
                    match burnchain.sync(Some(target_burnchain_block_height)) {
353,393✔
962
                        Ok(x) => x,
353,351✔
963
                        Err(e) => {
42✔
964
                            warn!("Runloop: Burnchain controller stopped: {e}");
42✔
965
                            continue;
42✔
966
                        }
967
                    };
968

969
                // *now* we know the burnchain height
970
                burnchain_tip = next_burnchain_tip;
353,351✔
971
                burnchain_height = tip_burnchain_height;
353,351✔
972

973
                let sortition_tip = &burnchain_tip.block_snapshot.sortition_id;
353,351✔
974
                let next_sortition_height = burnchain_tip.block_snapshot.block_height;
353,351✔
975

976
                if next_sortition_height != last_tenure_sortition_height {
353,351✔
977
                    info!(
11,439✔
978
                        "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}"
979
                    );
980
                }
341,912✔
981

982
                if next_sortition_height > sortition_db_height {
353,351✔
983
                    debug!(
11,279✔
984
                        "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}"
985
                    );
986

987
                    debug!("Runloop: block mining until we process all sortitions");
11,279✔
988
                    signal_mining_blocked(globals.get_miner_status());
11,279✔
989

990
                    // first, let's process all blocks in (sortition_db_height, next_sortition_height]
991
                    for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) {
61,970✔
992
                        // stop mining so we can advance the sortition DB and so our
993
                        // ProcessTenure() directive (sent by relayer_sortition_notify() below)
994
                        // will be unblocked.
995

996
                        let block = {
61,970✔
997
                            let ic = burnchain.sortdb_ref().index_conn();
61,970✔
998
                            SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip)
61,970✔
999
                                .unwrap()
61,970✔
1000
                                .expect(
61,970✔
1001
                                    "Failed to find block in fork processed by burnchain indexer",
61,970✔
1002
                                )
1003
                        };
1004

1005
                        let sortition_id = &block.sortition_id;
61,970✔
1006

1007
                        // Have the node process the new block, that can include, or not, a sortition.
1008
                        node.process_burnchain_state(
61,970✔
1009
                            self.config(),
61,970✔
1010
                            burnchain.sortdb_mut(),
61,970✔
1011
                            sortition_id,
61,970✔
1012
                            ibd,
61,970✔
1013
                        );
1014

1015
                        // Now, tell the relayer to check if it won a sortition during this block,
1016
                        // and, if so, to process and advertize the block.  This is basically a
1017
                        // no-op during boot-up.
1018
                        //
1019
                        // _this will block if the relayer's buffer is full_
1020
                        if !node.relayer_sortition_notify() {
61,970✔
1021
                            // First check if we were supposed to cleanly exit
1022
                            if !globals.keep_running() {
70✔
1023
                                // The p2p thread relies on the same atomic_bool, it will
1024
                                // discontinue its execution after completing its ongoing runloop epoch.
1025
                                info!("Terminating p2p process");
70✔
1026
                                info!("Terminating relayer");
70✔
1027
                                info!("Terminating chains-coordinator");
70✔
1028

1029
                                globals.coord().stop_chains_coordinator();
70✔
1030
                                coordinator_thread_handle.join().unwrap();
70✔
1031
                                let peer_network = node.join();
70✔
1032

1033
                                // Data that will be passed to Nakamoto run loop
1034
                                // Only gets transferred on clean shutdown of neon run loop
1035
                                let data_to_naka = Neon2NakaData::new(globals, peer_network);
70✔
1036

1037
                                info!("Exiting stacks-node");
70✔
1038
                                return Some(data_to_naka);
70✔
1039
                            }
×
1040
                            // relayer hung up, exit.
1041
                            error!("Runloop: Block relayer and miner hung up, exiting.");
×
1042
                            return None;
×
1043
                        }
61,900✔
1044
                    }
1045

1046
                    debug!("Runloop: enable miner after processing sortitions");
11,209✔
1047
                    signal_mining_ready(globals.get_miner_status());
11,209✔
1048

1049
                    debug!(
11,209✔
1050
                        "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})"
1051
                    );
1052

1053
                    sortition_db_height = next_sortition_height;
11,209✔
1054
                } else if ibd {
342,072✔
1055
                    // drive block processing after we reach the burnchain tip.
×
1056
                    // we may have downloaded all the blocks already,
×
1057
                    // so we can't rely on the relayer alone to
×
1058
                    // drive it.
×
1059
                    globals.coord().announce_new_stacks_block();
×
1060
                }
342,072✔
1061

1062
                if burnchain_height >= target_burnchain_block_height
353,281✔
1063
                    || burnchain_height >= remote_chain_height
36✔
1064
                {
1065
                    break;
353,283✔
1066
                }
2,147,483,647✔
1067
            }
1068

1069
            if sortition_db_height >= burnchain_height && !ibd {
353,327✔
1070
                let canonical_stacks_tip_height =
342,034✔
1071
                    SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn())
342,034✔
1072
                        .map(|snapshot| snapshot.canonical_stacks_tip_height)
342,034✔
1073
                        .unwrap_or(0);
342,034✔
1074
                if canonical_stacks_tip_height < mine_start {
342,034✔
1075
                    info!(
×
1076
                        "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip"
1077
                    );
1078
                } else {
1079
                    // once we've synced to the chain tip once, don't apply this check again.
1080
                    //  this prevents a possible corner case in the event of a PoX fork.
1081
                    mine_start = 0;
342,034✔
1082
                    globals.set_start_mining_height_if_zero(sortition_db_height);
342,034✔
1083

1084
                    // at tip, and not downloading. proceed to mine.
1085
                    if last_tenure_sortition_height != sortition_db_height {
342,034✔
1086
                        if is_miner {
7,875✔
1087
                            info!(
7,873✔
1088
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks"
1089
                            );
1090
                        } else {
1091
                            info!(
2✔
1092
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}."
1093
                            );
1094
                        }
1095
                        last_tenure_sortition_height = sortition_db_height;
7,875✔
1096
                    }
334,159✔
1097

1098
                    if !node.relayer_issue_tenure(ibd) {
342,034✔
1099
                        // First check if we were supposed to cleanly exit
1100
                        if !globals.keep_running() {
67✔
1101
                            // The p2p thread relies on the same atomic_bool, it will
1102
                            // discontinue its execution after completing its ongoing runloop epoch.
1103
                            info!("Terminating p2p process");
67✔
1104
                            info!("Terminating relayer");
67✔
1105
                            info!("Terminating chains-coordinator");
67✔
1106

1107
                            globals.coord().stop_chains_coordinator();
67✔
1108
                            coordinator_thread_handle.join().unwrap();
67✔
1109
                            let peer_network = node.join();
67✔
1110

1111
                            // Data that will be passed to Nakamoto run loop
1112
                            // Only gets transferred on clean shutdown of neon run loop
1113
                            let data_to_naka = Neon2NakaData::new(globals, peer_network);
67✔
1114

1115
                            info!("Exiting stacks-node");
67✔
1116
                            return Some(data_to_naka);
67✔
1117
                        }
×
1118
                        // relayer hung up, exit.
1119
                        error!("Runloop: Block relayer and miner hung up, exiting.");
×
1120
                        break None;
×
1121
                    }
5✔
1122
                }
1123
            }
11,293✔
1124
        }
1125
    }
279✔
1126
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc