• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

stacks-network / stacks-core / 23483544962

24 Mar 2026 09:57AM UTC coverage: 85.685% (-0.02%) from 85.708%
23483544962

push

github

web-flow
Merge pull request #7029 from wileyj/chore/update_markdown

chore: Update link to page in SECURITY.md

186521 of 217682 relevant lines covered (85.69%)

17172874.4 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.34
/stacks-node/src/run_loop/neon.rs
1
#[cfg(test)]
2
use std::sync::atomic::AtomicU64;
3
use std::sync::atomic::{AtomicBool, Ordering};
4
use std::sync::mpsc::sync_channel;
5
use std::sync::{Arc, Mutex};
6
use std::thread;
7
use std::thread::JoinHandle;
8

9
use libc;
10
use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType};
11
use stacks::burnchains::{Burnchain, Error as burnchain_error};
12
use stacks::chainstate::burn::db::sortdb::SortitionDB;
13
use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash};
14
use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers};
15
use stacks::chainstate::coordinator::{
16
    migrate_chainstate_dbs, ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication,
17
    Error as coord_error,
18
};
19
use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState};
20
use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus};
21
use stacks::core::StacksEpochId;
22
use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment};
23
#[cfg(test)]
24
use stacks::util::tests::TestFlag;
25
use stacks::util_lib::db::Error as db_error;
26
use stacks_common::deps_common::ctrlc as termination;
27
use stacks_common::deps_common::ctrlc::SignalId;
28
use stacks_common::types::PublicKey;
29
use stacks_common::util::hash::Hash160;
30
use stx_genesis::GenesisData;
31

32
use super::RunLoopCallbacks;
33
use crate::burnchains::{make_bitcoin_indexer, Error};
34
use crate::globals::NeonGlobals as Globals;
35
use crate::monitoring::{start_serving_monitoring_metrics, MonitoringError};
36
use crate::neon_node::{
37
    LeaderKeyRegistrationState, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER,
38
};
39
use crate::node::{
40
    get_account_balances, get_account_lockups, get_names, get_namespaces,
41
    use_test_genesis_chainstate,
42
};
43
use crate::run_loop::boot_nakamoto::Neon2NakaData;
44
use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms};
45
use crate::{
46
    run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain,
47
};
48

49
pub const STDERR: i32 = 2;
50

51
#[cfg(test)]
52
#[derive(Clone, Default)]
53
pub struct RunLoopField<T>(pub Arc<Mutex<T>>);
54

55
#[cfg(not(test))]
56
#[derive(Clone, Default)]
57
pub struct RunLoopField<T>(pub std::marker::PhantomData<T>);
58

59
#[cfg(test)]
60
#[derive(Clone)]
61
pub struct RunLoopCounter(pub Arc<AtomicU64>);
62

63
#[cfg(not(test))]
64
#[derive(Clone)]
65
pub struct RunLoopCounter();
66

67
impl Default for RunLoopCounter {
68
    #[cfg(test)]
69
    fn default() -> Self {
5,796✔
70
        RunLoopCounter(Arc::new(AtomicU64::new(0)))
5,796✔
71
    }
5,796✔
72
    #[cfg(not(test))]
73
    fn default() -> Self {
74
        Self()
75
    }
76
}
77

78
impl<T: Clone> RunLoopField<Option<T>> {
79
    #[cfg(test)]
80
    pub fn get(&self) -> T {
1✔
81
        self.0.lock().unwrap().clone().unwrap()
1✔
82
    }
1✔
83
}
84

85
impl RunLoopCounter {
86
    #[cfg(test)]
87
    pub fn get(&self) -> u64 {
3,044✔
88
        self.0.load(Ordering::SeqCst)
3,044✔
89
    }
3,044✔
90

91
    #[cfg(test)]
92
    pub fn load(&self, ordering: Ordering) -> u64 {
61,665✔
93
        self.0.load(ordering)
61,665✔
94
    }
61,665✔
95
}
96

97
#[cfg(test)]
98
impl std::ops::Deref for RunLoopCounter {
99
    type Target = Arc<AtomicU64>;
100

101
    fn deref(&self) -> &Self::Target {
1,887✔
102
        &self.0
1,887✔
103
    }
1,887✔
104
}
105

106
#[derive(Clone, Default)]
107
pub struct Counters {
108
    pub blocks_processed: RunLoopCounter,
109
    pub microblocks_processed: RunLoopCounter,
110
    pub missed_tenures: RunLoopCounter,
111
    pub missed_microblock_tenures: RunLoopCounter,
112
    pub cancelled_commits: RunLoopCounter,
113

114
    pub sortitions_processed: RunLoopCounter,
115

116
    pub naka_submitted_vrfs: RunLoopCounter,
117
    /// the number of submitted commits
118
    pub neon_submitted_commits: RunLoopCounter,
119
    /// the burn block height when the last commit was submitted
120
    pub neon_submitted_commit_last_burn_height: RunLoopCounter,
121
    pub naka_submitted_commits: RunLoopCounter,
122
    /// the burn block height when the last commit was submitted
123
    pub naka_submitted_commit_last_burn_height: RunLoopCounter,
124
    pub naka_mined_blocks: RunLoopCounter,
125
    pub naka_rejected_blocks: RunLoopCounter,
126
    pub naka_proposed_blocks: RunLoopCounter,
127
    pub naka_mined_tenures: RunLoopCounter,
128
    pub naka_signer_pushed_blocks: RunLoopCounter,
129
    pub naka_miner_directives: RunLoopCounter,
130
    pub naka_submitted_commit_last_stacks_tip: RunLoopCounter,
131
    pub naka_submitted_commit_last_commit_amount: RunLoopCounter,
132
    pub naka_submitted_commit_last_parent_tenure_id: RunLoopField<Option<ConsensusHash>>,
133

134
    pub naka_miner_current_rejections: RunLoopCounter,
135
    pub naka_miner_current_rejections_timeout_secs: RunLoopCounter,
136

137
    #[cfg(test)]
138
    pub skip_commit_op: TestFlag<bool>,
139
}
140

141
impl Counters {
142
    pub fn new() -> Self {
1✔
143
        Self::default()
1✔
144
    }
1✔
145

146
    #[cfg(test)]
147
    fn inc(ctr: &RunLoopCounter) {
31,930✔
148
        ctr.0.fetch_add(1, Ordering::SeqCst);
31,930✔
149
    }
31,930✔
150

151
    #[cfg(not(test))]
152
    fn inc(_ctr: &RunLoopCounter) {}
153

154
    #[cfg(test)]
155
    fn set(ctr: &RunLoopCounter, value: u64) {
11,555✔
156
        ctr.0.store(value, Ordering::SeqCst);
11,555✔
157
    }
11,555✔
158

159
    #[cfg(not(test))]
160
    fn set(_ctr: &RunLoopCounter, _value: u64) {}
161

162
    #[cfg(test)]
163
    fn update<T: Clone>(ctr: &RunLoopField<Option<T>>, value: &T) {
1,576✔
164
        let mut mutex = ctr.0.lock().expect("FATAL: test counter mutext poisoned");
1,576✔
165
        let _ = mutex.replace(value.clone());
1,576✔
166
    }
1,576✔
167

168
    #[cfg(not(test))]
169
    fn update<T: Clone>(_ctr: &RunLoopField<Option<T>>, _value: &T) {}
170

171
    pub fn bump_blocks_processed(&self) {
11,470✔
172
        Counters::inc(&self.blocks_processed);
11,470✔
173
    }
11,470✔
174

175
    pub fn bump_sortitions_processed(&self) {
4,174✔
176
        Counters::inc(&self.sortitions_processed);
4,174✔
177
    }
4,174✔
178

179
    pub fn bump_microblocks_processed(&self) {
×
180
        Counters::inc(&self.microblocks_processed);
×
181
    }
×
182

183
    pub fn bump_missed_tenures(&self) {
1,035✔
184
        Counters::inc(&self.missed_tenures);
1,035✔
185
    }
1,035✔
186

187
    pub fn bump_missed_microblock_tenures(&self) {
×
188
        Counters::inc(&self.missed_microblock_tenures);
×
189
    }
×
190

191
    pub fn bump_cancelled_commits(&self) {
×
192
        Counters::inc(&self.cancelled_commits);
×
193
    }
×
194

195
    pub fn bump_neon_submitted_commits(&self, committed_burn_height: u64) {
6,491✔
196
        Counters::inc(&self.neon_submitted_commits);
6,491✔
197
        Counters::set(
6,491✔
198
            &self.neon_submitted_commit_last_burn_height,
6,491✔
199
            committed_burn_height,
6,491✔
200
        );
201
    }
6,491✔
202

203
    pub fn bump_naka_submitted_vrfs(&self) {
36✔
204
        Counters::inc(&self.naka_submitted_vrfs);
36✔
205
    }
36✔
206

207
    pub fn bump_naka_submitted_commits(
1,576✔
208
        &self,
1,576✔
209
        committed_burn_height: u64,
1,576✔
210
        committed_stacks_height: u64,
1,576✔
211
        committed_sats_amount: u64,
1,576✔
212
        committed_parent_tenure_id: &ConsensusHash,
1,576✔
213
    ) {
1,576✔
214
        Counters::inc(&self.naka_submitted_commits);
1,576✔
215
        Counters::set(
1,576✔
216
            &self.naka_submitted_commit_last_burn_height,
1,576✔
217
            committed_burn_height,
1,576✔
218
        );
219
        Counters::set(
1,576✔
220
            &self.naka_submitted_commit_last_stacks_tip,
1,576✔
221
            committed_stacks_height,
1,576✔
222
        );
223
        Counters::set(
1,576✔
224
            &self.naka_submitted_commit_last_commit_amount,
1,576✔
225
            committed_sats_amount,
1,576✔
226
        );
227
        Counters::update(
1,576✔
228
            &self.naka_submitted_commit_last_parent_tenure_id,
1,576✔
229
            committed_parent_tenure_id,
1,576✔
230
        );
231
    }
1,576✔
232

233
    pub fn bump_naka_mined_blocks(&self) {
2,171✔
234
        Counters::inc(&self.naka_mined_blocks);
2,171✔
235
    }
2,171✔
236

237
    pub fn bump_naka_proposed_blocks(&self) {
2,263✔
238
        Counters::inc(&self.naka_proposed_blocks);
2,263✔
239
    }
2,263✔
240

241
    pub fn bump_naka_rejected_blocks(&self) {
85✔
242
        Counters::inc(&self.naka_rejected_blocks);
85✔
243
    }
85✔
244

245
    pub fn bump_naka_signer_pushed_blocks(&self) {
15✔
246
        Counters::inc(&self.naka_signer_pushed_blocks);
15✔
247
    }
15✔
248

249
    pub fn bump_naka_mined_tenures(&self) {
1,211✔
250
        Counters::inc(&self.naka_mined_tenures);
1,211✔
251
    }
1,211✔
252

253
    pub fn bump_naka_miner_directives(&self) {
1,403✔
254
        Counters::inc(&self.naka_miner_directives);
1,403✔
255
    }
1,403✔
256

257
    pub fn set_microblocks_processed(&self, value: u64) {
100✔
258
        Counters::set(&self.microblocks_processed, value)
100✔
259
    }
100✔
260

261
    pub fn set_miner_current_rejections_timeout_secs(&self, value: u64) {
118✔
262
        Counters::set(&self.naka_miner_current_rejections_timeout_secs, value)
118✔
263
    }
118✔
264

265
    pub fn set_miner_current_rejections(&self, value: u32) {
118✔
266
        Counters::set(&self.naka_miner_current_rejections, u64::from(value))
118✔
267
    }
118✔
268
}
269

270
/// Coordinating a node running in neon mode.
271
pub struct RunLoop {
272
    config: Config,
273
    pub callbacks: RunLoopCallbacks,
274
    globals: Option<Globals>,
275
    counters: Counters,
276
    coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>,
277
    should_keep_running: Arc<AtomicBool>,
278
    event_dispatcher: EventDispatcher,
279
    pox_watchdog: Option<PoxSyncWatchdog>, // can't be instantiated until .start() is called
280
    is_miner: Option<bool>,                // not known until .start() is called
281
    burnchain: Option<Burnchain>,          // not known until .start() is called
282
    pox_watchdog_comms: PoxSyncWatchdogComms,
283
    /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is
284
    /// instantiated (namely, so the test framework can access it).
285
    miner_status: Arc<Mutex<MinerStatus>>,
286
    monitoring_thread: Option<JoinHandle<Result<(), MonitoringError>>>,
287
}
288

289
/// Write to stderr in an async-safe manner.
290
/// See signal-safety(7)
291
fn async_safe_write_stderr(msg: &str) {
×
292
    #[cfg(windows)]
293
    unsafe {
294
        // write(2) inexplicably has a different ABI only on Windows.
295
        libc::write(
296
            STDERR,
297
            msg.as_ptr() as *const libc::c_void,
298
            msg.len() as u32,
299
        );
300
    }
301
    #[cfg(not(windows))]
302
    unsafe {
×
303
        libc::write(STDERR, msg.as_ptr() as *const libc::c_void, msg.len());
×
304
    }
×
305
}
×
306

307
impl RunLoop {
308
    /// Sets up a runloop and node, given a config.
309
    pub fn new(config: Config) -> Self {
273✔
310
        let channels = CoordinatorCommunication::instantiate();
273✔
311
        let should_keep_running = Arc::new(AtomicBool::new(true));
273✔
312
        let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone());
273✔
313
        let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready(
273✔
314
            config.burnchain.burn_fee_cap,
273✔
315
        )));
316

317
        let mut event_dispatcher = EventDispatcher::new(config.get_working_dir());
273✔
318
        for observer in config.events_observers.iter() {
969✔
319
            event_dispatcher.register_observer(observer);
945✔
320
        }
945✔
321
        event_dispatcher.process_pending_payloads();
273✔
322

323
        Self {
273✔
324
            config,
273✔
325
            globals: None,
273✔
326
            coordinator_channels: Some(channels),
273✔
327
            callbacks: RunLoopCallbacks::new(),
273✔
328
            counters: Counters::default(),
273✔
329
            should_keep_running,
273✔
330
            event_dispatcher,
273✔
331
            pox_watchdog: None,
273✔
332
            is_miner: None,
273✔
333
            burnchain: None,
273✔
334
            pox_watchdog_comms,
273✔
335
            miner_status,
273✔
336
            monitoring_thread: None,
273✔
337
        }
273✔
338
    }
273✔
339

340
    pub fn get_globals(&self) -> Globals {
540✔
341
        self.globals
540✔
342
            .clone()
540✔
343
            .expect("FATAL: globals not instantiated")
540✔
344
    }
540✔
345

346
    fn set_globals(&mut self, globals: Globals) {
270✔
347
        self.globals = Some(globals);
270✔
348
    }
270✔
349

350
    pub fn get_coordinator_channel(&self) -> Option<CoordinatorChannels> {
271✔
351
        self.coordinator_channels.as_ref().map(|x| x.1.clone())
271✔
352
    }
271✔
353

354
    pub fn get_blocks_processed_arc(&self) -> RunLoopCounter {
34✔
355
        self.counters.blocks_processed.clone()
34✔
356
    }
34✔
357

358
    pub fn get_microblocks_processed_arc(&self) -> RunLoopCounter {
×
359
        self.counters.microblocks_processed.clone()
×
360
    }
×
361

362
    pub fn get_missed_tenures_arc(&self) -> RunLoopCounter {
1✔
363
        self.counters.missed_tenures.clone()
1✔
364
    }
1✔
365

366
    pub fn get_missed_microblock_tenures_arc(&self) -> RunLoopCounter {
×
367
        self.counters.missed_microblock_tenures.clone()
×
368
    }
×
369

370
    pub fn get_cancelled_commits_arc(&self) -> RunLoopCounter {
×
371
        self.counters.cancelled_commits.clone()
×
372
    }
×
373

374
    pub fn get_counters(&self) -> Counters {
527✔
375
        self.counters.clone()
527✔
376
    }
527✔
377

378
    pub fn config(&self) -> &Config {
61,194✔
379
        &self.config
61,194✔
380
    }
61,194✔
381

382
    pub fn get_event_dispatcher(&self) -> EventDispatcher {
540✔
383
        self.event_dispatcher.clone()
540✔
384
    }
540✔
385

386
    pub fn is_miner(&self) -> bool {
270✔
387
        self.is_miner.unwrap_or(false)
270✔
388
    }
270✔
389

390
    pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms {
×
391
        self.pox_watchdog_comms.clone()
×
392
    }
×
393

394
    pub fn get_termination_switch(&self) -> Arc<AtomicBool> {
716✔
395
        self.should_keep_running.clone()
716✔
396
    }
716✔
397

398
    pub fn get_burnchain(&self) -> Burnchain {
1,080✔
399
        self.burnchain
1,080✔
400
            .clone()
1,080✔
401
            .expect("FATAL: tried to get runloop burnchain before calling .start()")
1,080✔
402
    }
1,080✔
403

404
    pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog {
357,483✔
405
        self.pox_watchdog
357,483✔
406
            .as_mut()
357,483✔
407
            .expect("FATAL: tried to get PoX watchdog before calling .start()")
357,483✔
408
    }
357,483✔
409

410
    pub fn get_miner_status(&self) -> Arc<Mutex<MinerStatus>> {
270✔
411
        self.miner_status.clone()
270✔
412
    }
270✔
413

414
    /// Set up termination handler.  Have a signal set the `should_keep_running` atomic bool to
415
    /// false.  Panics of called more than once.
416
    pub fn setup_termination_handler(keep_running_writer: Arc<AtomicBool>, allow_err: bool) {
507✔
417
        let install = termination::set_handler(move |sig_id| match sig_id {
507✔
418
            SignalId::Bus => {
419
                let msg = "Caught SIGBUS; crashing immediately and dumping core\n";
×
420
                async_safe_write_stderr(msg);
×
421
                unsafe {
422
                    libc::abort();
×
423
                }
424
            }
425
            _ => {
×
426
                let msg = format!("Graceful termination request received (signal `{sig_id}`), will complete the ongoing runloop cycles and terminate\n");
×
427
                async_safe_write_stderr(&msg);
×
428
                keep_running_writer.store(false, Ordering::SeqCst);
×
429
            }
×
430
        });
×
431

432
        if let Err(e) = install {
507✔
433
            // integration tests can do this
434
            if cfg!(test) || allow_err {
283✔
435
                info!("Error setting up signal handler, may have already been set");
283✔
436
            } else {
437
                panic!("FATAL: error setting termination handler - {e}");
×
438
            }
439
        }
224✔
440
    }
507✔
441

442
    /// Seconds to wait before retrying UTXO check during startup
443
    const UTXO_RETRY_INTERVAL: u64 = 10;
444
    /// Number of times to retry UTXO check during startup
445
    const UTXO_RETRY_COUNT: u64 = 6;
446

447
    /// Determine if we're the miner.
448
    /// If there's a network error, then assume that we're not a miner.
449
    fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool {
271✔
450
        if self.config.node.miner {
271✔
451
            // If we are mock mining, then we don't need to check for UTXOs and
452
            // we can just return true.
453
            if self.config.get_node_config(false).mock_mining {
267✔
454
                return true;
5✔
455
            }
262✔
456
            let keychain = Keychain::default(self.config.node.seed.clone());
262✔
457
            let mut op_signer = keychain.generate_op_signer();
262✔
458
            if let Err(e) = burnchain.create_wallet_if_dne() {
262✔
459
                warn!("Error when creating wallet: {e:?}");
×
460
            }
262✔
461
            let mut btc_addrs = vec![(
262✔
462
                StacksEpochId::Epoch2_05,
262✔
463
                // legacy
262✔
464
                BitcoinAddress::from_bytes_legacy(
262✔
465
                    self.config.burnchain.get_bitcoin_network().1,
262✔
466
                    LegacyBitcoinAddressType::PublicKeyHash,
262✔
467
                    &Hash160::from_data(&op_signer.get_public_key().to_bytes()).0,
262✔
468
                )
262✔
469
                .expect("FATAL: failed to construct legacy bitcoin address"),
262✔
470
            )];
262✔
471
            if self.config.miner.segwit {
262✔
472
                btc_addrs.push((
×
473
                    StacksEpochId::Epoch21,
×
474
                    // segwit p2wpkh
×
475
                    BitcoinAddress::from_bytes_segwit_p2wpkh(
×
476
                        self.config.burnchain.get_bitcoin_network().1,
×
477
                        &Hash160::from_data(&op_signer.get_public_key().to_bytes_compressed()).0,
×
478
                    )
×
479
                    .expect("FATAL: failed to construct segwit p2wpkh address"),
×
480
                ));
×
481
            }
262✔
482

483
            // retry UTXO check a few times, in case bitcoind is still starting up
484
            for _ in 0..Self::UTXO_RETRY_COUNT {
262✔
485
                for (epoch_id, btc_addr) in &btc_addrs {
270✔
486
                    info!("Miner node: checking UTXOs at address: {btc_addr}");
270✔
487
                    let utxos =
270✔
488
                        burnchain.get_utxos(*epoch_id, &op_signer.get_public_key(), 1, None, 0);
270✔
489
                    if utxos.is_none() {
270✔
490
                        warn!("UTXOs not found for {btc_addr}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {btc_addr} (importaddress)");
9✔
491
                    } else {
492
                        info!("UTXOs found - will run as a Miner node");
261✔
493
                        return true;
261✔
494
                    }
495
                }
496
                thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL));
9✔
497
            }
498
            panic!("No UTXOs found, exiting");
1✔
499
        } else {
500
            info!("Will run as a Follower node");
4✔
501
            false
4✔
502
        }
503
    }
270✔
504

505
    /// Instantiate the burnchain client and databases.
506
    /// Fetches headers and instantiates the burnchain.
507
    /// Panics on failure.
508
    pub fn instantiate_burnchain_state(
507✔
509
        config: &Config,
507✔
510
        should_keep_running: Arc<AtomicBool>,
507✔
511
        burnchain_opt: Option<Burnchain>,
507✔
512
        coordinator_senders: CoordinatorChannels,
507✔
513
    ) -> Result<BitcoinRegtestController, burnchain_error> {
507✔
514
        // Initialize and start the burnchain.
515
        let mut burnchain_controller = BitcoinRegtestController::with_burnchain(
507✔
516
            config.clone(),
507✔
517
            Some(coordinator_senders),
507✔
518
            burnchain_opt,
507✔
519
            Some(should_keep_running.clone()),
507✔
520
        );
521

522
        let burnchain = burnchain_controller.get_burnchain();
507✔
523
        let epochs = burnchain_controller.get_stacks_epochs();
507✔
524

525
        // sanity check -- epoch data must be valid
526
        Config::assert_valid_epoch_settings(&burnchain, &epochs);
507✔
527

528
        // Upgrade chainstate databases if they exist already
529
        // NOTE: this has to be done before the subsequent call to
530
        // `burnchain_controller.connect_dbs()` below!
531
        match migrate_chainstate_dbs(
507✔
532
            &epochs,
507✔
533
            &burnchain,
507✔
534
            &config.get_burn_db_file_path(),
507✔
535
            &config.get_chainstate_path_str(),
507✔
536
            Some(config.node.get_marf_opts()),
507✔
537
        ) {
538
            Ok(_) => {}
507✔
539
            Err(coord_error::DBError(db_error::TooOldForEpoch)) => {
540
                error!(
×
541
                    "FATAL: chainstate database(s) are not compatible with the current system epoch"
542
                );
543
                panic!();
×
544
            }
545
            Err(e) => {
×
546
                panic!("FATAL: unable to query filesystem or databases: {e:?}");
×
547
            }
548
        }
549

550
        info!("Start syncing Bitcoin headers, feel free to grab a cup of coffee, this can take a while");
507✔
551

552
        let burnchain_config = burnchain_controller.get_burnchain();
507✔
553
        let target_burnchain_block_height = match burnchain_config
507✔
554
            .get_highest_burnchain_block()
507✔
555
            .expect("FATAL: failed to access burnchain database")
507✔
556
        {
557
            Some(burnchain_tip) => {
236✔
558
                // database exists already, and has blocks -- just sync to its tip.
559
                let target_height = burnchain_tip.block_height + 1;
236✔
560
                debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {target_height}", burnchain_tip.block_height);
236✔
561
                target_height
236✔
562
            }
563
            None => {
564
                // database does not exist yet
565
                let target_height = 1.max(burnchain_config.first_block_height + 1);
271✔
566
                debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {target_height}");
271✔
567
                target_height
271✔
568
            }
569
        };
570

571
        burnchain_controller
507✔
572
            .start(Some(target_burnchain_block_height))
507✔
573
            .map_err(|e| {
507✔
574
                if matches!(e, Error::CoordinatorClosed)
1✔
575
                    && !should_keep_running.load(Ordering::SeqCst)
1✔
576
                {
577
                    info!("Shutdown initiated during burnchain initialization: {e}");
1✔
578
                    return burnchain_error::ShutdownInitiated;
1✔
579
                }
×
580
                error!("Burnchain controller stopped: {e}");
×
581
                panic!();
×
582
            })?;
1✔
583

584
        // if the chainstate DBs don't exist, this will instantiate them
585
        if let Err(e) = burnchain_controller.connect_dbs() {
506✔
586
            error!("Failed to connect to burnchain databases: {e}");
×
587
            panic!();
×
588
        };
506✔
589

590
        // TODO (hack) instantiate the sortdb in the burnchain
591
        let _ = burnchain_controller.sortdb_mut();
506✔
592
        Ok(burnchain_controller)
506✔
593
    }
507✔
594

595
    /// Boot up the stacks chainstate.
596
    /// Instantiate the chainstate and push out the boot receipts to observers
597
    /// This is only public so we can test it.
598
    pub fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState {
271✔
599
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
271✔
600

601
        // load up genesis balances
602
        let initial_balances = self
271✔
603
            .config
271✔
604
            .initial_balances
271✔
605
            .iter()
271✔
606
            .map(|e| (e.address.clone(), e.amount))
2,091✔
607
            .collect();
271✔
608

609
        // instantiate chainstate
610
        let mut boot_data = ChainStateBootData {
271✔
611
            initial_balances,
271✔
612
            post_flight_callback: None,
271✔
613
            first_burnchain_block_hash: burnchain_config.first_block_hash.clone(),
271✔
614
            first_burnchain_block_height: burnchain_config.first_block_height as u32,
271✔
615
            first_burnchain_block_timestamp: burnchain_config.first_block_timestamp,
271✔
616
            pox_constants: burnchain_config.pox_constants.clone(),
271✔
617
            get_bulk_initial_lockups: Some(Box::new(move || {
271✔
618
                get_account_lockups(use_test_genesis_data)
271✔
619
            })),
271✔
620
            get_bulk_initial_balances: Some(Box::new(move || {
271✔
621
                get_account_balances(use_test_genesis_data)
271✔
622
            })),
271✔
623
            get_bulk_initial_namespaces: Some(Box::new(move || {
271✔
624
                get_namespaces(use_test_genesis_data)
271✔
625
            })),
271✔
626
            get_bulk_initial_names: Some(Box::new(move || get_names(use_test_genesis_data))),
271✔
627
        };
628

629
        info!("About to call open_and_exec");
271✔
630
        let (chain_state_db, receipts) = StacksChainState::open_and_exec(
271✔
631
            self.config.is_mainnet(),
271✔
632
            self.config.burnchain.chain_id,
271✔
633
            &self.config.get_chainstate_path_str(),
271✔
634
            Some(&mut boot_data),
271✔
635
            Some(self.config.node.get_marf_opts()),
271✔
636
        )
271✔
637
        .unwrap();
271✔
638
        run_loop::announce_boot_receipts(
271✔
639
            &mut self.event_dispatcher,
271✔
640
            &chain_state_db,
271✔
641
            &burnchain_config.pox_constants,
271✔
642
            &receipts,
271✔
643
        );
644
        chain_state_db
271✔
645
    }
271✔
646

647
    /// Instantiate the Stacks chain state and start the chains coordinator thread.
648
    /// Returns the coordinator thread handle, and the receiving end of the coordinator's atlas
649
    /// attachment channel.
650
    fn spawn_chains_coordinator(
270✔
651
        &mut self,
270✔
652
        burnchain_config: &Burnchain,
270✔
653
        coordinator_receivers: CoordinatorReceivers,
270✔
654
        miner_status: Arc<Mutex<MinerStatus>>,
270✔
655
    ) -> JoinHandle<()> {
270✔
656
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
270✔
657

658
        // load up genesis Atlas attachments
659
        let mut atlas_config = AtlasConfig::new(self.config.is_mainnet());
270✔
660
        let genesis_attachments = GenesisData::new(use_test_genesis_data)
270✔
661
            .read_name_zonefiles()
270✔
662
            .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec()))
3,240✔
663
            .collect();
270✔
664
        atlas_config.genesis_attachments = Some(genesis_attachments);
270✔
665

666
        let chain_state_db = self.boot_chainstate(burnchain_config);
270✔
667

668
        // NOTE: re-instantiate AtlasConfig so we don't have to keep the genesis attachments around
669
        let moved_atlas_config = self.config.atlas.clone();
270✔
670
        let moved_config = self.config.clone();
270✔
671
        let moved_burnchain_config = burnchain_config.clone();
270✔
672
        let coordinator_dispatcher = self.event_dispatcher.clone();
270✔
673
        let atlas_db = AtlasDB::connect(
270✔
674
            moved_atlas_config.clone(),
270✔
675
            &self.config.get_atlas_db_file_path(),
270✔
676
            true,
677
        )
678
        .expect("Failed to connect Atlas DB during startup");
270✔
679
        let coordinator_indexer =
270✔
680
            make_bitcoin_indexer(&self.config, Some(self.should_keep_running.clone()));
270✔
681

682
        let coordinator_thread_handle = thread::Builder::new()
270✔
683
            .name(format!(
270✔
684
                "chains-coordinator-{}",
685
                &moved_config.node.rpc_bind
270✔
686
            ))
687
            .stack_size(BLOCK_PROCESSOR_STACK_SIZE)
270✔
688
            .spawn(move || {
270✔
689
                debug!(
270✔
690
                    "chains-coordinator thread ID is {:?}",
691
                    thread::current().id()
×
692
                );
693
                let mut cost_estimator = moved_config.make_cost_estimator();
270✔
694
                let mut fee_estimator = moved_config.make_fee_estimator();
270✔
695

696
                let coord_config = ChainsCoordinatorConfig {
270✔
697
                    txindex: moved_config.node.txindex,
270✔
698
                };
270✔
699
                ChainsCoordinator::run(
270✔
700
                    coord_config,
270✔
701
                    chain_state_db,
270✔
702
                    moved_burnchain_config,
270✔
703
                    &coordinator_dispatcher,
270✔
704
                    coordinator_receivers,
270✔
705
                    moved_atlas_config,
270✔
706
                    cost_estimator.as_deref_mut(),
270✔
707
                    fee_estimator.as_deref_mut(),
270✔
708
                    miner_status,
270✔
709
                    coordinator_indexer,
270✔
710
                    atlas_db,
270✔
711
                );
712
            })
270✔
713
            .expect("FATAL: failed to start chains coordinator thread");
270✔
714

715
        coordinator_thread_handle
270✔
716
    }
270✔
717

718
    /// Instantiate the PoX watchdog
719
    fn instantiate_pox_watchdog(&mut self) {
270✔
720
        let pox_watchdog = PoxSyncWatchdog::new(&self.config, self.pox_watchdog_comms.clone())
270✔
721
            .expect("FATAL: failed to instantiate PoX sync watchdog");
270✔
722
        self.pox_watchdog = Some(pox_watchdog);
270✔
723
    }
270✔
724

725
    /// Start Prometheus logging
726
    fn start_prometheus(&mut self) {
270✔
727
        let Some(prometheus_bind) = self.config.node.prometheus_bind.clone() else {
270✔
728
            return;
262✔
729
        };
730
        let monitoring_thread = thread::Builder::new()
8✔
731
            .name("prometheus".to_string())
8✔
732
            .spawn(move || {
8✔
733
                debug!("prometheus thread ID is {:?}", thread::current().id());
8✔
734
                start_serving_monitoring_metrics(prometheus_bind)
8✔
735
            })
8✔
736
            .expect("FATAL: failed to start monitoring thread");
8✔
737

738
        self.monitoring_thread.replace(monitoring_thread);
8✔
739
    }
270✔
740

741
    pub fn take_monitoring_thread(&mut self) -> Option<JoinHandle<Result<(), MonitoringError>>> {
234✔
742
        self.monitoring_thread.take()
234✔
743
    }
234✔
744

745
    /// Get the sortition DB's highest block height, aligned to a reward cycle boundary, and the
746
    /// highest sortition.
747
    /// Returns (height at rc start, sortition)
748
    fn get_reward_cycle_sortition_db_height(
270✔
749
        sortdb: &SortitionDB,
270✔
750
        burnchain_config: &Burnchain,
270✔
751
    ) -> (u64, BlockSnapshot) {
270✔
752
        let (stacks_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())
270✔
753
            .expect("BUG: failed to load canonical stacks chain tip hash");
270✔
754

755
        let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_ch)
270✔
756
            .expect("BUG: failed to query sortition DB")
270✔
757
        {
758
            Some(sn) => sn,
270✔
759
            None => {
760
                debug!("No canonical stacks chain tip hash present");
×
761
                let sn = SortitionDB::get_first_block_snapshot(sortdb.conn())
×
762
                    .expect("BUG: failed to get first-ever block snapshot");
×
763
                sn
×
764
            }
765
        };
766

767
        (
270✔
768
            burnchain_config.reward_cycle_to_block_height(
270✔
769
                burnchain_config
270✔
770
                    .block_height_to_reward_cycle(sn.block_height)
270✔
771
                    .expect("BUG: snapshot preceeds first reward cycle"),
270✔
772
            ),
270✔
773
            sn,
270✔
774
        )
270✔
775
    }
270✔
776

777
    /// Starts the node runloop.
778
    ///
779
    /// This function will block by looping infinitely.
780
    /// It will start the burnchain (separate thread), set-up a channel in
781
    /// charge of coordinating the new blocks coming from the burnchain and
782
    /// the nodes, taking turns on tenures.
783
    ///
784
    /// Returns `Option<NeonGlobals>` so that data can be passed to `NakamotoNode`
785
    pub fn start(
272✔
786
        &mut self,
272✔
787
        burnchain_opt: Option<Burnchain>,
272✔
788
        mut mine_start: u64,
272✔
789
    ) -> Option<Neon2NakaData> {
272✔
790
        let (coordinator_receivers, coordinator_senders) = self
272✔
791
            .coordinator_channels
272✔
792
            .take()
272✔
793
            .expect("Run loop already started, can only start once after initialization.");
272✔
794

795
        Self::setup_termination_handler(self.should_keep_running.clone(), false);
272✔
796

797
        let burnchain_result = Self::instantiate_burnchain_state(
272✔
798
            &self.config,
272✔
799
            self.should_keep_running.clone(),
272✔
800
            burnchain_opt,
272✔
801
            coordinator_senders.clone(),
272✔
802
        );
803

804
        let mut burnchain = match burnchain_result {
271✔
805
            Ok(burnchain_controller) => burnchain_controller,
271✔
806
            Err(burnchain_error::ShutdownInitiated) => {
807
                info!("Exiting stacks-node");
1✔
808
                return None;
1✔
809
            }
810
            Err(e) => {
×
811
                error!("Error initializing burnchain: {e}");
×
812
                info!("Exiting stacks-node");
×
813
                return None;
×
814
            }
815
        };
816

817
        let burnchain_config = burnchain.get_burnchain();
271✔
818
        self.burnchain = Some(burnchain_config.clone());
271✔
819

820
        // can we mine?
821
        let is_miner = self.check_is_miner(&mut burnchain);
271✔
822
        self.is_miner = Some(is_miner);
271✔
823

824
        // relayer linkup
825
        let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER);
271✔
826

827
        // set up globals so other subsystems can instantiate off of the runloop state.
828
        let globals = Globals::new(
271✔
829
            coordinator_senders,
271✔
830
            self.get_miner_status(),
271✔
831
            relay_send,
271✔
832
            self.counters.clone(),
271✔
833
            self.pox_watchdog_comms.clone(),
271✔
834
            self.should_keep_running.clone(),
271✔
835
            mine_start,
271✔
836
            LeaderKeyRegistrationState::default(),
271✔
837
        );
838
        self.set_globals(globals.clone());
271✔
839

840
        // have headers; boot up the chains coordinator and instantiate the chain state
841
        let coordinator_thread_handle = self.spawn_chains_coordinator(
271✔
842
            &burnchain_config,
271✔
843
            coordinator_receivers,
271✔
844
            globals.get_miner_status(),
271✔
845
        );
846
        self.instantiate_pox_watchdog();
271✔
847
        self.start_prometheus();
271✔
848

849
        // We announce a new burn block so that the chains coordinator
850
        // can resume prior work and handle eventual unprocessed sortitions
851
        // stored during a previous session.
852
        globals.coord().announce_new_burn_block();
271✔
853

854
        // Make sure at least one sortition has happened, and make sure it's globally available
855
        let sortdb = burnchain.sortdb_mut();
271✔
856
        let (rc_aligned_height, sn) =
271✔
857
            RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config);
271✔
858

859
        let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height {
271✔
860
            // need at least one sortition to happen.
861
            burnchain
270✔
862
                .wait_for_sortitions(globals.coord().clone(), sn.block_height + 1)
270✔
863
                .expect("Unable to get burnchain tip")
270✔
864
                .block_snapshot
270✔
865
        } else {
866
            sn
1✔
867
        };
868

869
        globals.set_last_sortition(burnchain_tip_snapshot);
271✔
870

871
        // Boot up the p2p network and relayer, and figure out how many sortitions we have so far
872
        // (it could be non-zero if the node is resuming from chainstate)
873
        let mut node = StacksNode::spawn(self, globals.clone(), relay_recv);
271✔
874

875
        // Wait for all pending sortitions to process
876
        let burnchain_db = burnchain_config
271✔
877
            .open_burnchain_db(true)
271✔
878
            .expect("FATAL: failed to open burnchain DB");
271✔
879
        let burnchain_db_tip = burnchain_db
271✔
880
            .get_canonical_chain_tip()
271✔
881
            .expect("FATAL: failed to query burnchain DB");
271✔
882
        let mut burnchain_tip = burnchain
271✔
883
            .wait_for_sortitions(globals.coord().clone(), burnchain_db_tip.block_height)
271✔
884
            .expect("Unable to get burnchain tip");
271✔
885

886
        // Start the runloop
887
        debug!("Runloop: Begin run loop");
271✔
888
        self.counters.bump_blocks_processed();
271✔
889

890
        let mut sortition_db_height = rc_aligned_height;
271✔
891
        let mut burnchain_height = sortition_db_height;
271✔
892

893
        // prepare to fetch the first reward cycle!
894
        debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}");
271✔
895

896
        let mut last_tenure_sortition_height = 0;
271✔
897

898
        loop {
899
            if !globals.keep_running() {
357,622✔
900
                // The p2p thread relies on the same atomic_bool, it will
901
                // discontinue its execution after completing its ongoing runloop epoch.
902
                info!("Terminating p2p process");
139✔
903
                info!("Terminating relayer");
139✔
904
                info!("Terminating chains-coordinator");
139✔
905

906
                globals.coord().stop_chains_coordinator();
139✔
907
                coordinator_thread_handle.join().unwrap();
139✔
908
                let peer_network = node.join();
139✔
909

910
                // Data that will be passed to Nakamoto run loop
911
                // Only gets transferred on clean shutdown of neon run loop
912
                let data_to_naka = Neon2NakaData::new(globals, peer_network);
139✔
913

914
                info!("Exiting stacks-node");
139✔
915
                break Some(data_to_naka);
139✔
916
            }
357,483✔
917

918
            let remote_chain_height = burnchain.get_headers_height() - 1;
357,483✔
919

920
            // wait until it's okay to process the next reward cycle's sortitions.
921
            let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait(
357,483✔
922
                &burnchain_config,
357,483✔
923
                &burnchain_tip,
357,483✔
924
                remote_chain_height,
357,483✔
925
            ) {
357,483✔
926
                Ok(x) => x,
357,402✔
927
                Err(e) => {
81✔
928
                    debug!("Runloop: PoX sync wait routine aborted: {e:?}");
81✔
929
                    continue;
81✔
930
                }
931
            };
932

933
            // calculate burnchain sync percentage
934
            let percent: f64 = if remote_chain_height > 0 {
357,402✔
935
                burnchain_tip.block_snapshot.block_height as f64 / remote_chain_height as f64
357,400✔
936
            } else {
937
                0.0
2✔
938
            };
939

940
            // Download each burnchain block and process their sortitions.  This, in turn, will
941
            // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and
942
            // process them.  This loop runs for one reward cycle, so that the next pass of the
943
            // runloop will cause the PoX sync watchdog to wait until it believes that the node has
944
            // obtained all the Stacks blocks it can.
945
            debug!(
357,402✔
946
                "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})",
947
                burnchain_config
×
948
                    .block_height_to_reward_cycle(target_burnchain_block_height)
×
949
                    .expect("FATAL: target burnchain block height does not have a reward cycle");
×
950
                "total_burn_sync_percent" => %percent,
951
                "local_burn_height" => burnchain_tip.block_snapshot.block_height,
×
952
                "remote_tip_height" => remote_chain_height
×
953
            );
954

955
            loop {
956
                if !globals.keep_running() {
357,446✔
957
                    break;
47✔
958
                }
357,399✔
959

960
                let (next_burnchain_tip, tip_burnchain_height) =
357,353✔
961
                    match burnchain.sync(Some(target_burnchain_block_height)) {
357,399✔
962
                        Ok(x) => x,
357,353✔
963
                        Err(e) => {
46✔
964
                            warn!("Runloop: Burnchain controller stopped: {e}");
46✔
965
                            continue;
46✔
966
                        }
967
                    };
968

969
                // *now* we know the burnchain height
970
                burnchain_tip = next_burnchain_tip;
357,353✔
971
                burnchain_height = tip_burnchain_height;
357,353✔
972

973
                let sortition_tip = &burnchain_tip.block_snapshot.sortition_id;
357,353✔
974
                let next_sortition_height = burnchain_tip.block_snapshot.block_height;
357,353✔
975

976
                if next_sortition_height != last_tenure_sortition_height {
357,353✔
977
                    info!(
11,130✔
978
                        "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}"
979
                    );
980
                }
346,223✔
981

982
                if next_sortition_height > sortition_db_height {
357,353✔
983
                    debug!(
10,970✔
984
                        "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}"
985
                    );
986

987
                    debug!("Runloop: block mining until we process all sortitions");
10,970✔
988
                    signal_mining_blocked(globals.get_miner_status());
10,970✔
989

990
                    // first, let's process all blocks in (sortition_db_height, next_sortition_height]
991
                    for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) {
60,384✔
992
                        // stop mining so we can advance the sortition DB and so our
993
                        // ProcessTenure() directive (sent by relayer_sortition_notify() below)
994
                        // will be unblocked.
995

996
                        let block = {
60,384✔
997
                            let ic = burnchain.sortdb_ref().index_conn();
60,384✔
998
                            SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip)
60,384✔
999
                                .unwrap()
60,384✔
1000
                                .expect(
60,384✔
1001
                                    "Failed to find block in fork processed by burnchain indexer",
60,384✔
1002
                                )
1003
                        };
1004

1005
                        let sortition_id = &block.sortition_id;
60,384✔
1006

1007
                        // Have the node process the new block, that can include, or not, a sortition.
1008
                        node.process_burnchain_state(
60,384✔
1009
                            self.config(),
60,384✔
1010
                            burnchain.sortdb_mut(),
60,384✔
1011
                            sortition_id,
60,384✔
1012
                            ibd,
60,384✔
1013
                        );
1014

1015
                        // Now, tell the relayer to check if it won a sortition during this block,
1016
                        // and, if so, to process and advertize the block.  This is basically a
1017
                        // no-op during boot-up.
1018
                        //
1019
                        // _this will block if the relayer's buffer is full_
1020
                        if !node.relayer_sortition_notify() {
60,384✔
1021
                            // First check if we were supposed to cleanly exit
1022
                            if !globals.keep_running() {
66✔
1023
                                // The p2p thread relies on the same atomic_bool, it will
1024
                                // discontinue its execution after completing its ongoing runloop epoch.
1025
                                info!("Terminating p2p process");
66✔
1026
                                info!("Terminating relayer");
66✔
1027
                                info!("Terminating chains-coordinator");
66✔
1028

1029
                                globals.coord().stop_chains_coordinator();
66✔
1030
                                coordinator_thread_handle.join().unwrap();
66✔
1031
                                let peer_network = node.join();
66✔
1032

1033
                                // Data that will be passed to Nakamoto run loop
1034
                                // Only gets transferred on clean shutdown of neon run loop
1035
                                let data_to_naka = Neon2NakaData::new(globals, peer_network);
66✔
1036

1037
                                info!("Exiting stacks-node");
66✔
1038
                                return Some(data_to_naka);
66✔
1039
                            }
×
1040
                            // relayer hung up, exit.
1041
                            error!("Runloop: Block relayer and miner hung up, exiting.");
×
1042
                            return None;
×
1043
                        }
60,318✔
1044
                    }
1045

1046
                    debug!("Runloop: enable miner after processing sortitions");
10,904✔
1047
                    signal_mining_ready(globals.get_miner_status());
10,904✔
1048

1049
                    debug!(
10,904✔
1050
                        "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})"
1051
                    );
1052

1053
                    sortition_db_height = next_sortition_height;
10,904✔
1054
                } else if ibd {
346,383✔
1055
                    // drive block processing after we reach the burnchain tip.
×
1056
                    // we may have downloaded all the blocks already,
×
1057
                    // so we can't rely on the relayer alone to
×
1058
                    // drive it.
×
1059
                    globals.coord().announce_new_stacks_block();
×
1060
                }
346,383✔
1061

1062
                if burnchain_height >= target_burnchain_block_height
357,287✔
1063
                    || burnchain_height >= remote_chain_height
33✔
1064
                {
1065
                    break;
357,289✔
1066
                }
2,147,483,647✔
1067
            }
1068

1069
            if sortition_db_height >= burnchain_height && !ibd {
357,336✔
1070
                let canonical_stacks_tip_height =
346,352✔
1071
                    SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn())
346,352✔
1072
                        .map(|snapshot| snapshot.canonical_stacks_tip_height)
346,352✔
1073
                        .unwrap_or(0);
346,352✔
1074
                if canonical_stacks_tip_height < mine_start {
346,352✔
1075
                    info!(
×
1076
                        "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip"
1077
                    );
1078
                } else {
1079
                    // once we've synced to the chain tip once, don't apply this check again.
1080
                    //  this prevents a possible corner case in the event of a PoX fork.
1081
                    mine_start = 0;
346,352✔
1082
                    globals.set_start_mining_height_if_zero(sortition_db_height);
346,352✔
1083

1084
                    // at tip, and not downloading. proceed to mine.
1085
                    if last_tenure_sortition_height != sortition_db_height {
346,352✔
1086
                        if is_miner {
7,728✔
1087
                            info!(
7,726✔
1088
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks"
1089
                            );
1090
                        } else {
1091
                            info!(
2✔
1092
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}."
1093
                            );
1094
                        }
1095
                        last_tenure_sortition_height = sortition_db_height;
7,728✔
1096
                    }
338,624✔
1097

1098
                    if !node.relayer_issue_tenure(ibd) {
346,352✔
1099
                        // First check if we were supposed to cleanly exit
1100
                        if !globals.keep_running() {
66✔
1101
                            // The p2p thread relies on the same atomic_bool, it will
1102
                            // discontinue its execution after completing its ongoing runloop epoch.
1103
                            info!("Terminating p2p process");
66✔
1104
                            info!("Terminating relayer");
66✔
1105
                            info!("Terminating chains-coordinator");
66✔
1106

1107
                            globals.coord().stop_chains_coordinator();
66✔
1108
                            coordinator_thread_handle.join().unwrap();
66✔
1109
                            let peer_network = node.join();
66✔
1110

1111
                            // Data that will be passed to Nakamoto run loop
1112
                            // Only gets transferred on clean shutdown of neon run loop
1113
                            let data_to_naka = Neon2NakaData::new(globals, peer_network);
66✔
1114

1115
                            info!("Exiting stacks-node");
66✔
1116
                            return Some(data_to_naka);
66✔
1117
                        }
×
1118
                        // relayer hung up, exit.
1119
                        error!("Runloop: Block relayer and miner hung up, exiting.");
×
1120
                        break None;
×
1121
                    }
5✔
1122
                }
1123
            }
10,984✔
1124
        }
1125
    }
272✔
1126
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc