• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

stacks-network / stacks-core / 25864253919-1

14 May 2026 01:59PM UTC coverage: 85.775% (+0.06%) from 85.712%
25864253919-1

Pull #7060

github

2190ea
web-flow
Merge 688987f1a into 31276d071
Pull Request #7060: feat: marf squash engine

2535 of 2791 new or added lines in 14 files covered. (90.83%)

5293 existing lines in 102 files now uncovered.

190751 of 222385 relevant lines covered (85.78%)

18719152.56 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.83
/stacks-node/src/run_loop/neon.rs
1
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
2
// Copyright (C) 2020-2026 Stacks Open Internet Foundation
3
//
4
// This program is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8
//
9
// This program is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13
//
14
// You should have received a copy of the GNU General Public License
15
// along with this program.  If not, see <http://www.gnu.org/licenses/>.
16

17
#[cfg(test)]
18
use std::sync::atomic::AtomicU64;
19
use std::sync::atomic::{AtomicBool, Ordering};
20
use std::sync::mpsc::sync_channel;
21
use std::sync::{Arc, Mutex};
22
use std::thread;
23
use std::thread::JoinHandle;
24

25
use libc;
26
use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType};
27
use stacks::burnchains::{Burnchain, Error as burnchain_error};
28
use stacks::chainstate::burn::db::sortdb::SortitionDB;
29
use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash};
30
use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers};
31
use stacks::chainstate::coordinator::{
32
    migrate_chainstate_dbs, ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication,
33
    Error as coord_error,
34
};
35
use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState};
36
use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus};
37
use stacks::core::StacksEpochId;
38
use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment};
39
#[cfg(test)]
40
use stacks::util::tests::TestFlag;
41
use stacks::util_lib::db::Error as db_error;
42
use stacks_common::deps_common::ctrlc as termination;
43
use stacks_common::deps_common::ctrlc::SignalId;
44
use stacks_common::types::PublicKey;
45
use stacks_common::util::hash::Hash160;
46
use stx_genesis::GenesisData;
47

48
use super::RunLoopCallbacks;
49
use crate::burnchains::{make_bitcoin_indexer, Error};
50
use crate::globals::NeonGlobals as Globals;
51
use crate::monitoring::{start_serving_monitoring_metrics, MonitoringError};
52
use crate::neon_node::{
53
    LeaderKeyRegistrationState, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER,
54
};
55
use crate::node::{
56
    get_account_balances, get_account_lockups, get_names, get_namespaces,
57
    use_test_genesis_chainstate,
58
};
59
use crate::run_loop::boot_nakamoto::Neon2NakaData;
60
use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms};
61
use crate::{
62
    run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain,
63
};
64

65
pub const STDERR: i32 = 2;
66

67
#[cfg(test)]
68
#[derive(Clone, Default)]
69
pub struct RunLoopField<T>(pub Arc<Mutex<T>>);
70

71
#[cfg(not(test))]
72
#[derive(Clone, Default)]
73
pub struct RunLoopField<T>(pub std::marker::PhantomData<T>);
74

75
#[cfg(test)]
76
#[derive(Clone)]
77
pub struct RunLoopCounter(pub Arc<AtomicU64>);
78

79
#[cfg(not(test))]
80
#[derive(Clone)]
81
pub struct RunLoopCounter();
82

83
impl Default for RunLoopCounter {
84
    #[cfg(test)]
85
    fn default() -> Self {
6,027✔
86
        RunLoopCounter(Arc::new(AtomicU64::new(0)))
6,027✔
87
    }
6,027✔
88
    #[cfg(not(test))]
89
    fn default() -> Self {
90
        Self()
91
    }
92
}
93

94
impl<T: Clone> RunLoopField<Option<T>> {
95
    #[cfg(test)]
96
    pub fn get(&self) -> T {
1✔
97
        self.0.lock().unwrap().clone().unwrap()
1✔
98
    }
1✔
99
}
100

101
impl RunLoopCounter {
102
    #[cfg(test)]
103
    pub fn get(&self) -> u64 {
3,332✔
104
        self.0.load(Ordering::SeqCst)
3,332✔
105
    }
3,332✔
106

107
    #[cfg(test)]
108
    pub fn load(&self, ordering: Ordering) -> u64 {
64,818✔
109
        self.0.load(ordering)
64,818✔
110
    }
64,818✔
111
}
112

113
#[cfg(test)]
114
impl std::ops::Deref for RunLoopCounter {
115
    type Target = Arc<AtomicU64>;
116

117
    fn deref(&self) -> &Self::Target {
1,953✔
118
        &self.0
1,953✔
119
    }
1,953✔
120
}
121

122
#[derive(Clone, Default)]
123
pub struct Counters {
124
    pub blocks_processed: RunLoopCounter,
125
    pub microblocks_processed: RunLoopCounter,
126
    pub missed_tenures: RunLoopCounter,
127
    pub missed_microblock_tenures: RunLoopCounter,
128
    pub cancelled_commits: RunLoopCounter,
129

130
    pub sortitions_processed: RunLoopCounter,
131

132
    pub naka_submitted_vrfs: RunLoopCounter,
133
    /// the number of submitted commits
134
    pub neon_submitted_commits: RunLoopCounter,
135
    /// the burn block height when the last commit was submitted
136
    pub neon_submitted_commit_last_burn_height: RunLoopCounter,
137
    pub naka_submitted_commits: RunLoopCounter,
138
    /// the burn block height when the last commit was submitted
139
    pub naka_submitted_commit_last_burn_height: RunLoopCounter,
140
    pub naka_mined_blocks: RunLoopCounter,
141
    pub naka_rejected_blocks: RunLoopCounter,
142
    pub naka_proposed_blocks: RunLoopCounter,
143
    pub naka_mined_tenures: RunLoopCounter,
144
    pub naka_signer_pushed_blocks: RunLoopCounter,
145
    pub naka_miner_directives: RunLoopCounter,
146
    pub naka_submitted_commit_last_stacks_tip: RunLoopCounter,
147
    pub naka_submitted_commit_last_commit_amount: RunLoopCounter,
148
    pub naka_submitted_commit_last_parent_tenure_id: RunLoopField<Option<ConsensusHash>>,
149

150
    pub naka_miner_current_rejections: RunLoopCounter,
151
    pub naka_miner_current_rejections_timeout_secs: RunLoopCounter,
152

153
    #[cfg(test)]
154
    pub skip_commit_op: TestFlag<bool>,
155
}
156

157
impl Counters {
158
    pub fn new() -> Self {
1✔
159
        Self::default()
1✔
160
    }
1✔
161

162
    #[cfg(test)]
163
    fn inc(ctr: &RunLoopCounter) {
33,391✔
164
        ctr.0.fetch_add(1, Ordering::SeqCst);
33,391✔
165
    }
33,391✔
166

167
    #[cfg(not(test))]
168
    fn inc(_ctr: &RunLoopCounter) {}
169

170
    #[cfg(test)]
171
    fn set(ctr: &RunLoopCounter, value: u64) {
12,203✔
172
        ctr.0.store(value, Ordering::SeqCst);
12,203✔
173
    }
12,203✔
174

175
    #[cfg(not(test))]
176
    fn set(_ctr: &RunLoopCounter, _value: u64) {}
177

178
    #[cfg(test)]
179
    fn update<T: Clone>(ctr: &RunLoopField<Option<T>>, value: &T) {
1,696✔
180
        let mut mutex = ctr.0.lock().expect("FATAL: test counter mutext poisoned");
1,696✔
181
        let _ = mutex.replace(value.clone());
1,696✔
182
    }
1,696✔
183

184
    #[cfg(not(test))]
185
    fn update<T: Clone>(_ctr: &RunLoopField<Option<T>>, _value: &T) {}
186

187
    pub fn bump_blocks_processed(&self) {
11,948✔
188
        Counters::inc(&self.blocks_processed);
11,948✔
189
    }
11,948✔
190

191
    pub fn bump_sortitions_processed(&self) {
4,363✔
192
        Counters::inc(&self.sortitions_processed);
4,363✔
193
    }
4,363✔
194

UNCOV
195
    pub fn bump_microblocks_processed(&self) {
×
UNCOV
196
        Counters::inc(&self.microblocks_processed);
×
UNCOV
197
    }
×
198

199
    pub fn bump_missed_tenures(&self) {
1,141✔
200
        Counters::inc(&self.missed_tenures);
1,141✔
201
    }
1,141✔
202

UNCOV
203
    pub fn bump_missed_microblock_tenures(&self) {
×
UNCOV
204
        Counters::inc(&self.missed_microblock_tenures);
×
UNCOV
205
    }
×
206

UNCOV
207
    pub fn bump_cancelled_commits(&self) {
×
UNCOV
208
        Counters::inc(&self.cancelled_commits);
×
UNCOV
209
    }
×
210

211
    pub fn bump_neon_submitted_commits(&self, committed_burn_height: u64) {
6,750✔
212
        Counters::inc(&self.neon_submitted_commits);
6,750✔
213
        Counters::set(
6,750✔
214
            &self.neon_submitted_commit_last_burn_height,
6,750✔
215
            committed_burn_height,
6,750✔
216
        );
217
    }
6,750✔
218

219
    pub fn bump_naka_submitted_vrfs(&self) {
37✔
220
        Counters::inc(&self.naka_submitted_vrfs);
37✔
221
    }
37✔
222

223
    pub fn bump_naka_submitted_commits(
1,696✔
224
        &self,
1,696✔
225
        committed_burn_height: u64,
1,696✔
226
        committed_stacks_height: u64,
1,696✔
227
        committed_sats_amount: u64,
1,696✔
228
        committed_parent_tenure_id: &ConsensusHash,
1,696✔
229
    ) {
1,696✔
230
        Counters::inc(&self.naka_submitted_commits);
1,696✔
231
        Counters::set(
1,696✔
232
            &self.naka_submitted_commit_last_burn_height,
1,696✔
233
            committed_burn_height,
1,696✔
234
        );
235
        Counters::set(
1,696✔
236
            &self.naka_submitted_commit_last_stacks_tip,
1,696✔
237
            committed_stacks_height,
1,696✔
238
        );
239
        Counters::set(
1,696✔
240
            &self.naka_submitted_commit_last_commit_amount,
1,696✔
241
            committed_sats_amount,
1,696✔
242
        );
243
        Counters::update(
1,696✔
244
            &self.naka_submitted_commit_last_parent_tenure_id,
1,696✔
245
            committed_parent_tenure_id,
1,696✔
246
        );
247
    }
1,696✔
248

249
    pub fn bump_naka_mined_blocks(&self) {
2,234✔
250
        Counters::inc(&self.naka_mined_blocks);
2,234✔
251
    }
2,234✔
252

253
    pub fn bump_naka_proposed_blocks(&self) {
2,318✔
254
        Counters::inc(&self.naka_proposed_blocks);
2,318✔
255
    }
2,318✔
256

257
    pub fn bump_naka_rejected_blocks(&self) {
85✔
258
        Counters::inc(&self.naka_rejected_blocks);
85✔
259
    }
85✔
260

261
    pub fn bump_naka_signer_pushed_blocks(&self) {
14✔
262
        Counters::inc(&self.naka_signer_pushed_blocks);
14✔
263
    }
14✔
264

265
    pub fn bump_naka_mined_tenures(&self) {
1,297✔
266
        Counters::inc(&self.naka_mined_tenures);
1,297✔
267
    }
1,297✔
268

269
    pub fn bump_naka_miner_directives(&self) {
1,508✔
270
        Counters::inc(&self.naka_miner_directives);
1,508✔
271
    }
1,508✔
272

273
    pub fn set_microblocks_processed(&self, value: u64) {
107✔
274
        Counters::set(&self.microblocks_processed, value)
107✔
275
    }
107✔
276

277
    pub fn set_miner_current_rejections_timeout_secs(&self, value: u64) {
129✔
278
        Counters::set(&self.naka_miner_current_rejections_timeout_secs, value)
129✔
279
    }
129✔
280

281
    pub fn set_miner_current_rejections(&self, value: u32) {
129✔
282
        Counters::set(&self.naka_miner_current_rejections, u64::from(value))
129✔
283
    }
129✔
284
}
285

286
/// Coordinating a node running in neon mode.
287
pub struct RunLoop {
288
    config: Config,
289
    pub callbacks: RunLoopCallbacks,
290
    globals: Option<Globals>,
291
    counters: Counters,
292
    coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>,
293
    should_keep_running: Arc<AtomicBool>,
294
    event_dispatcher: EventDispatcher,
295
    pox_watchdog: Option<PoxSyncWatchdog>, // can't be instantiated until .start() is called
296
    is_miner: Option<bool>,                // not known until .start() is called
297
    burnchain: Option<Burnchain>,          // not known until .start() is called
298
    pox_watchdog_comms: PoxSyncWatchdogComms,
299
    /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is
300
    /// instantiated (namely, so the test framework can access it).
301
    miner_status: Arc<Mutex<MinerStatus>>,
302
    monitoring_thread: Option<JoinHandle<Result<(), MonitoringError>>>,
303
}
304

305
/// Write to stderr in an async-safe manner.
306
/// See signal-safety(7)
UNCOV
307
fn async_safe_write_stderr(msg: &str) {
×
308
    #[cfg(windows)]
309
    unsafe {
310
        // write(2) inexplicably has a different ABI only on Windows.
311
        libc::write(
312
            STDERR,
313
            msg.as_ptr() as *const libc::c_void,
314
            msg.len() as u32,
315
        );
316
    }
317
    #[cfg(not(windows))]
UNCOV
318
    unsafe {
×
UNCOV
319
        libc::write(STDERR, msg.as_ptr() as *const libc::c_void, msg.len());
×
UNCOV
320
    }
×
UNCOV
321
}
×
322

323
impl RunLoop {
324
    /// Sets up a runloop and node, given a config.
325
    pub fn new(config: Config) -> Self {
284✔
326
        let channels = CoordinatorCommunication::instantiate();
284✔
327
        let should_keep_running = Arc::new(AtomicBool::new(true));
284✔
328
        let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone());
284✔
329
        let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready(
284✔
330
            config.burnchain.burn_fee_cap,
284✔
331
        )));
332

333
        let mut event_dispatcher = EventDispatcher::new_with_custom_queue_size(
284✔
334
            config.get_working_dir(),
284✔
335
            config.node.effective_event_dispatcher_queue_size(),
284✔
336
        );
337
        for observer in config.events_observers.iter() {
999✔
338
            event_dispatcher.register_observer(observer);
974✔
339
        }
974✔
340

341
        Self {
284✔
342
            config,
284✔
343
            globals: None,
284✔
344
            coordinator_channels: Some(channels),
284✔
345
            callbacks: RunLoopCallbacks::new(),
284✔
346
            counters: Counters::default(),
284✔
347
            should_keep_running,
284✔
348
            event_dispatcher,
284✔
349
            pox_watchdog: None,
284✔
350
            is_miner: None,
284✔
351
            burnchain: None,
284✔
352
            pox_watchdog_comms,
284✔
353
            miner_status,
284✔
354
            monitoring_thread: None,
284✔
355
        }
284✔
356
    }
284✔
357

358
    pub fn get_globals(&self) -> Globals {
562✔
359
        self.globals
562✔
360
            .clone()
562✔
361
            .expect("FATAL: globals not instantiated")
562✔
362
    }
562✔
363

364
    fn set_globals(&mut self, globals: Globals) {
281✔
365
        self.globals = Some(globals);
281✔
366
    }
281✔
367

368
    pub fn get_coordinator_channel(&self) -> Option<CoordinatorChannels> {
282✔
369
        self.coordinator_channels.as_ref().map(|x| x.1.clone())
282✔
370
    }
282✔
371

372
    pub fn get_blocks_processed_arc(&self) -> RunLoopCounter {
37✔
373
        self.counters.blocks_processed.clone()
37✔
374
    }
37✔
375

UNCOV
376
    pub fn get_microblocks_processed_arc(&self) -> RunLoopCounter {
×
UNCOV
377
        self.counters.microblocks_processed.clone()
×
UNCOV
378
    }
×
379

380
    pub fn get_missed_tenures_arc(&self) -> RunLoopCounter {
1✔
381
        self.counters.missed_tenures.clone()
1✔
382
    }
1✔
383

UNCOV
384
    pub fn get_missed_microblock_tenures_arc(&self) -> RunLoopCounter {
×
UNCOV
385
        self.counters.missed_microblock_tenures.clone()
×
UNCOV
386
    }
×
387

UNCOV
388
    pub fn get_cancelled_commits_arc(&self) -> RunLoopCounter {
×
UNCOV
389
        self.counters.cancelled_commits.clone()
×
UNCOV
390
    }
×
391

392
    pub fn get_counters(&self) -> Counters {
544✔
393
        self.counters.clone()
544✔
394
    }
544✔
395

396
    pub fn config(&self) -> &Config {
63,729✔
397
        &self.config
63,729✔
398
    }
63,729✔
399

400
    pub fn get_event_dispatcher(&self) -> EventDispatcher {
803✔
401
        self.event_dispatcher.clone()
803✔
402
    }
803✔
403

404
    pub fn is_miner(&self) -> bool {
281✔
405
        self.is_miner.unwrap_or(false)
281✔
406
    }
281✔
407

408
    pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms {
1✔
409
        self.pox_watchdog_comms.clone()
1✔
410
    }
1✔
411

412
    pub fn get_termination_switch(&self) -> Arc<AtomicBool> {
740✔
413
        self.should_keep_running.clone()
740✔
414
    }
740✔
415

416
    pub fn get_burnchain(&self) -> Burnchain {
1,124✔
417
        self.burnchain
1,124✔
418
            .clone()
1,124✔
419
            .expect("FATAL: tried to get runloop burnchain before calling .start()")
1,124✔
420
    }
1,124✔
421

422
    pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog {
365,959✔
423
        self.pox_watchdog
365,959✔
424
            .as_mut()
365,959✔
425
            .expect("FATAL: tried to get PoX watchdog before calling .start()")
365,959✔
426
    }
365,959✔
427

428
    pub fn get_miner_status(&self) -> Arc<Mutex<MinerStatus>> {
281✔
429
        self.miner_status.clone()
281✔
430
    }
281✔
431

432
    /// Set up termination handler.  Have a signal set the `should_keep_running` atomic bool to
433
    /// false.  Panics of called more than once.
434
    pub fn setup_termination_handler(keep_running_writer: Arc<AtomicBool>, allow_err: bool) {
526✔
435
        let install = termination::set_handler(move |sig_id| match sig_id {
526✔
436
            SignalId::Bus => {
437
                let msg = "Caught SIGBUS; crashing immediately and dumping core\n";
×
UNCOV
438
                async_safe_write_stderr(msg);
×
439
                unsafe {
UNCOV
440
                    libc::abort();
×
441
                }
442
            }
UNCOV
443
            _ => {
×
UNCOV
444
                let msg = format!("Graceful termination request received (signal `{sig_id}`), will complete the ongoing runloop cycles and terminate\n");
×
UNCOV
445
                async_safe_write_stderr(&msg);
×
UNCOV
446
                keep_running_writer.store(false, Ordering::SeqCst);
×
UNCOV
447
            }
×
UNCOV
448
        });
×
449

450
        if let Err(e) = install {
526✔
451
            // integration tests can do this
452
            if cfg!(test) || allow_err {
294✔
453
                info!("Error setting up signal handler, may have already been set");
294✔
454
            } else {
UNCOV
455
                panic!("FATAL: error setting termination handler - {e}");
×
456
            }
457
        }
232✔
458
    }
526✔
459

460
    /// Seconds to wait before retrying UTXO check during startup
461
    const UTXO_RETRY_INTERVAL: u64 = 10;
462
    /// Number of times to retry UTXO check during startup
463
    const UTXO_RETRY_COUNT: u64 = 6;
464

465
    /// Determine if we're the miner.
466
    /// If there's a network error, then assume that we're not a miner.
467
    fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool {
282✔
468
        if self.config.node.miner {
282✔
469
            // If we are mock mining, then we don't need to check for UTXOs and
470
            // we can just return true.
471
            if self.config.get_node_config(false).mock_mining {
277✔
472
                return true;
5✔
473
            }
272✔
474
            let keychain = Keychain::default(self.config.node.seed.clone());
272✔
475
            let mut op_signer = keychain.generate_op_signer();
272✔
476
            if let Err(e) = burnchain.create_wallet_if_dne() {
272✔
477
                warn!("Error when creating wallet: {e:?}");
×
478
            }
272✔
479
            let mut btc_addrs = vec![(
272✔
480
                StacksEpochId::Epoch2_05,
272✔
481
                // legacy
272✔
482
                BitcoinAddress::from_bytes_legacy(
272✔
483
                    self.config.burnchain.get_bitcoin_network().1,
272✔
484
                    LegacyBitcoinAddressType::PublicKeyHash,
272✔
485
                    &Hash160::from_data(&op_signer.get_public_key().to_bytes()).0,
272✔
486
                )
272✔
487
                .expect("FATAL: failed to construct legacy bitcoin address"),
272✔
488
            )];
272✔
489
            if self.config.miner.segwit {
272✔
UNCOV
490
                btc_addrs.push((
×
UNCOV
491
                    StacksEpochId::Epoch21,
×
UNCOV
492
                    // segwit p2wpkh
×
UNCOV
493
                    BitcoinAddress::from_bytes_segwit_p2wpkh(
×
UNCOV
494
                        self.config.burnchain.get_bitcoin_network().1,
×
UNCOV
495
                        &Hash160::from_data(&op_signer.get_public_key().to_bytes_compressed()).0,
×
UNCOV
496
                    )
×
UNCOV
497
                    .expect("FATAL: failed to construct segwit p2wpkh address"),
×
UNCOV
498
                ));
×
499
            }
272✔
500

501
            // retry UTXO check a few times, in case bitcoind is still starting up
502
            for _ in 0..Self::UTXO_RETRY_COUNT {
272✔
503
                for (epoch_id, btc_addr) in &btc_addrs {
280✔
504
                    info!("Miner node: checking UTXOs at address: {btc_addr}");
280✔
505
                    let utxos =
280✔
506
                        burnchain.get_utxos(*epoch_id, &op_signer.get_public_key(), 1, None, 0);
280✔
507
                    if utxos.is_none() {
280✔
508
                        warn!("UTXOs not found for {btc_addr}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {btc_addr} (importaddress)");
9✔
509
                    } else {
510
                        info!("UTXOs found - will run as a Miner node");
271✔
511
                        return true;
271✔
512
                    }
513
                }
514
                thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL));
9✔
515
            }
516
            panic!("No UTXOs found, exiting");
1✔
517
        } else {
518
            info!("Will run as a Follower node");
5✔
519
            false
5✔
520
        }
521
    }
281✔
522

523
    /// Instantiate the burnchain client and databases.
524
    /// Fetches headers and instantiates the burnchain.
525
    /// Panics on failure.
526
    pub fn instantiate_burnchain_state(
526✔
527
        config: &Config,
526✔
528
        should_keep_running: Arc<AtomicBool>,
526✔
529
        burnchain_opt: Option<Burnchain>,
526✔
530
        coordinator_senders: CoordinatorChannels,
526✔
531
    ) -> Result<BitcoinRegtestController, burnchain_error> {
526✔
532
        // Initialize and start the burnchain.
533
        let mut burnchain_controller = BitcoinRegtestController::with_burnchain(
526✔
534
            config.clone(),
526✔
535
            Some(coordinator_senders),
526✔
536
            burnchain_opt,
526✔
537
            Some(should_keep_running.clone()),
526✔
538
        );
539

540
        let burnchain = burnchain_controller.get_burnchain();
526✔
541
        let epochs = burnchain_controller.get_stacks_epochs();
526✔
542

543
        // sanity check -- epoch data must be valid
544
        Config::assert_valid_epoch_settings(&burnchain, &epochs);
526✔
545

546
        // Upgrade chainstate databases if they exist already
547
        // NOTE: this has to be done before the subsequent call to
548
        // `burnchain_controller.connect_dbs()` below!
549
        match migrate_chainstate_dbs(
526✔
550
            &epochs,
526✔
551
            &burnchain,
526✔
552
            &config.get_burn_db_file_path(),
526✔
553
            &config.get_chainstate_path_str(),
526✔
554
            Some(config.node.get_marf_opts()),
526✔
555
        ) {
556
            Ok(_) => {}
526✔
557
            Err(coord_error::DBError(db_error::TooOldForEpoch)) => {
UNCOV
558
                error!(
×
559
                    "FATAL: chainstate database(s) are not compatible with the current system epoch"
560
                );
UNCOV
561
                panic!();
×
562
            }
UNCOV
563
            Err(e) => {
×
UNCOV
564
                panic!("FATAL: unable to query filesystem or databases: {e:?}");
×
565
            }
566
        }
567

568
        info!("Start syncing Bitcoin headers, feel free to grab a cup of coffee, this can take a while");
526✔
569

570
        let burnchain_config = burnchain_controller.get_burnchain();
526✔
571
        let target_burnchain_block_height = match burnchain_config
526✔
572
            .get_highest_burnchain_block()
526✔
573
            .expect("FATAL: failed to access burnchain database")
526✔
574
        {
575
            Some(burnchain_tip) => {
244✔
576
                // database exists already, and has blocks -- just sync to its tip.
577
                let target_height = burnchain_tip.block_height + 1;
244✔
578
                debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {target_height}", burnchain_tip.block_height);
244✔
579
                target_height
244✔
580
            }
581
            None => {
582
                // database does not exist yet
583
                let target_height = 1.max(burnchain_config.first_block_height + 1);
282✔
584
                debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {target_height}");
282✔
585
                target_height
282✔
586
            }
587
        };
588

589
        burnchain_controller
526✔
590
            .start(Some(target_burnchain_block_height))
526✔
591
            .map_err(|e| {
526✔
592
                if matches!(e, Error::CoordinatorClosed)
1✔
593
                    && !should_keep_running.load(Ordering::SeqCst)
1✔
594
                {
595
                    info!("Shutdown initiated during burnchain initialization: {e}");
1✔
596
                    return burnchain_error::ShutdownInitiated;
1✔
UNCOV
597
                }
×
UNCOV
598
                error!("Burnchain controller stopped: {e}");
×
UNCOV
599
                panic!();
×
600
            })?;
1✔
601

602
        // if the chainstate DBs don't exist, this will instantiate them
603
        if let Err(e) = burnchain_controller.connect_dbs() {
525✔
UNCOV
604
            error!("Failed to connect to burnchain databases: {e}");
×
UNCOV
605
            panic!();
×
606
        };
525✔
607

608
        // TODO (hack) instantiate the sortdb in the burnchain
609
        let _ = burnchain_controller.sortdb_mut();
525✔
610
        Ok(burnchain_controller)
525✔
611
    }
526✔
612

613
    /// Boot up the stacks chainstate.
614
    /// Instantiate the chainstate and push out the boot receipts to observers
615
    /// This is only public so we can test it.
616
    pub fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState {
282✔
617
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
282✔
618

619
        // load up genesis balances
620
        let initial_balances = self
282✔
621
            .config
282✔
622
            .initial_balances
282✔
623
            .iter()
282✔
624
            .map(|e| (e.address.clone(), e.amount))
2,148✔
625
            .collect();
282✔
626

627
        // instantiate chainstate
628
        let mut boot_data = ChainStateBootData {
282✔
629
            initial_balances,
282✔
630
            post_flight_callback: None,
282✔
631
            first_burnchain_block_hash: burnchain_config.first_block_hash.clone(),
282✔
632
            first_burnchain_block_height: burnchain_config.first_block_height as u32,
282✔
633
            first_burnchain_block_timestamp: burnchain_config.first_block_timestamp,
282✔
634
            pox_constants: burnchain_config.pox_constants.clone(),
282✔
635
            get_bulk_initial_lockups: Some(Box::new(move || {
282✔
636
                get_account_lockups(use_test_genesis_data)
282✔
637
            })),
282✔
638
            get_bulk_initial_balances: Some(Box::new(move || {
282✔
639
                get_account_balances(use_test_genesis_data)
282✔
640
            })),
282✔
641
            get_bulk_initial_namespaces: Some(Box::new(move || {
282✔
642
                get_namespaces(use_test_genesis_data)
282✔
643
            })),
282✔
644
            get_bulk_initial_names: Some(Box::new(move || get_names(use_test_genesis_data))),
282✔
645
        };
646

647
        info!("About to call open_and_exec");
282✔
648
        let (chain_state_db, receipts) = StacksChainState::open_and_exec(
282✔
649
            self.config.is_mainnet(),
282✔
650
            self.config.burnchain.chain_id,
282✔
651
            &self.config.get_chainstate_path_str(),
282✔
652
            Some(&mut boot_data),
282✔
653
            Some(self.config.node.get_marf_opts()),
282✔
654
        )
282✔
655
        .unwrap();
282✔
656
        run_loop::announce_boot_receipts(
282✔
657
            &mut self.event_dispatcher,
282✔
658
            &chain_state_db,
282✔
659
            &burnchain_config.pox_constants,
282✔
660
            &receipts,
282✔
661
        );
662
        chain_state_db
282✔
663
    }
282✔
664

665
    /// Instantiate the Stacks chain state and start the chains coordinator thread.
666
    /// Returns the coordinator thread handle, and the receiving end of the coordinator's atlas
667
    /// attachment channel.
668
    fn spawn_chains_coordinator(
281✔
669
        &mut self,
281✔
670
        burnchain_config: &Burnchain,
281✔
671
        coordinator_receivers: CoordinatorReceivers,
281✔
672
        miner_status: Arc<Mutex<MinerStatus>>,
281✔
673
    ) -> JoinHandle<()> {
281✔
674
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
281✔
675

676
        // load up genesis Atlas attachments
677
        let mut atlas_config = AtlasConfig::new(self.config.is_mainnet());
281✔
678
        let genesis_attachments = GenesisData::new(use_test_genesis_data)
281✔
679
            .read_name_zonefiles()
281✔
680
            .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec()))
3,372✔
681
            .collect();
281✔
682
        atlas_config.genesis_attachments = Some(genesis_attachments);
281✔
683

684
        let chain_state_db = self.boot_chainstate(burnchain_config);
281✔
685

686
        // NOTE: re-instantiate AtlasConfig so we don't have to keep the genesis attachments around
687
        let moved_atlas_config = self.config.atlas.clone();
281✔
688
        let moved_config = self.config.clone();
281✔
689
        let moved_burnchain_config = burnchain_config.clone();
281✔
690
        let coordinator_dispatcher = self.event_dispatcher.clone();
281✔
691
        let atlas_db = AtlasDB::connect(
281✔
692
            moved_atlas_config.clone(),
281✔
693
            &self.config.get_atlas_db_file_path(),
281✔
694
            true,
695
        )
696
        .expect("Failed to connect Atlas DB during startup");
281✔
697
        let coordinator_indexer =
281✔
698
            make_bitcoin_indexer(&self.config, Some(self.should_keep_running.clone()));
281✔
699

700
        let coordinator_thread_handle = thread::Builder::new()
281✔
701
            .name(format!(
281✔
702
                "chains-coordinator-{}",
703
                &moved_config.node.rpc_bind
281✔
704
            ))
705
            .stack_size(BLOCK_PROCESSOR_STACK_SIZE)
281✔
706
            .spawn(move || {
281✔
707
                debug!(
281✔
708
                    "chains-coordinator thread ID is {:?}",
UNCOV
709
                    thread::current().id()
×
710
                );
711
                let mut cost_estimator = moved_config.make_cost_estimator();
281✔
712
                let mut fee_estimator = moved_config.make_fee_estimator();
281✔
713

714
                let coord_config = ChainsCoordinatorConfig {
281✔
715
                    txindex: moved_config.node.txindex,
281✔
716
                };
281✔
717
                ChainsCoordinator::run(
281✔
718
                    coord_config,
281✔
719
                    chain_state_db,
281✔
720
                    moved_burnchain_config,
281✔
721
                    &coordinator_dispatcher,
281✔
722
                    coordinator_receivers,
281✔
723
                    moved_atlas_config,
281✔
724
                    cost_estimator.as_deref_mut(),
281✔
725
                    fee_estimator.as_deref_mut(),
281✔
726
                    miner_status,
281✔
727
                    coordinator_indexer,
281✔
728
                    atlas_db,
281✔
729
                );
730
            })
281✔
731
            .expect("FATAL: failed to start chains coordinator thread");
281✔
732

733
        coordinator_thread_handle
281✔
734
    }
281✔
735

736
    /// Instantiate the PoX watchdog
737
    fn instantiate_pox_watchdog(&mut self) {
281✔
738
        let pox_watchdog = PoxSyncWatchdog::new(&self.config, self.pox_watchdog_comms.clone())
281✔
739
            .expect("FATAL: failed to instantiate PoX sync watchdog");
281✔
740
        self.pox_watchdog = Some(pox_watchdog);
281✔
741
    }
281✔
742

743
    /// Start Prometheus logging
744
    fn start_prometheus(&mut self) {
281✔
745
        let Some(prometheus_bind) = self.config.node.prometheus_bind.clone() else {
281✔
746
            return;
272✔
747
        };
748
        let monitoring_thread = thread::Builder::new()
9✔
749
            .name("prometheus".to_string())
9✔
750
            .spawn(move || {
9✔
751
                debug!("prometheus thread ID is {:?}", thread::current().id());
9✔
752
                start_serving_monitoring_metrics(prometheus_bind)
9✔
753
            })
9✔
754
            .expect("FATAL: failed to start monitoring thread");
9✔
755

756
        self.monitoring_thread.replace(monitoring_thread);
9✔
757
    }
281✔
758

759
    pub fn take_monitoring_thread(&mut self) -> Option<JoinHandle<Result<(), MonitoringError>>> {
242✔
760
        self.monitoring_thread.take()
242✔
761
    }
242✔
762

763
    /// Get the sortition DB's highest block height, aligned to a reward cycle boundary, and the
764
    /// highest sortition.
765
    /// Returns (height at rc start, sortition)
766
    fn get_reward_cycle_sortition_db_height(
281✔
767
        sortdb: &SortitionDB,
281✔
768
        burnchain_config: &Burnchain,
281✔
769
    ) -> (u64, BlockSnapshot) {
281✔
770
        let (stacks_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())
281✔
771
            .expect("BUG: failed to load canonical stacks chain tip hash");
281✔
772

773
        let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_ch)
281✔
774
            .expect("BUG: failed to query sortition DB")
281✔
775
        {
776
            Some(sn) => sn,
281✔
777
            None => {
UNCOV
778
                debug!("No canonical stacks chain tip hash present");
×
UNCOV
779
                let sn = SortitionDB::get_first_block_snapshot(sortdb.conn())
×
UNCOV
780
                    .expect("BUG: failed to get first-ever block snapshot");
×
UNCOV
781
                sn
×
782
            }
783
        };
784

785
        (
281✔
786
            burnchain_config.reward_cycle_to_block_height(
281✔
787
                burnchain_config
281✔
788
                    .block_height_to_reward_cycle(sn.block_height)
281✔
789
                    .expect("BUG: snapshot preceeds first reward cycle"),
281✔
790
            ),
281✔
791
            sn,
281✔
792
        )
281✔
793
    }
281✔
794

795
    /// Starts the node runloop.
796
    ///
797
    /// This function will block by looping infinitely.
798
    /// It will start the burnchain (separate thread), set-up a channel in
799
    /// charge of coordinating the new blocks coming from the burnchain and
800
    /// the nodes, taking turns on tenures.
801
    ///
802
    /// Returns `Option<NeonGlobals>` so that data can be passed to `NakamotoNode`
803
    pub fn start(
283✔
804
        &mut self,
283✔
805
        burnchain_opt: Option<Burnchain>,
283✔
806
        mut mine_start: u64,
283✔
807
    ) -> Option<Neon2NakaData> {
283✔
808
        let (coordinator_receivers, coordinator_senders) = self
283✔
809
            .coordinator_channels
283✔
810
            .take()
283✔
811
            .expect("Run loop already started, can only start once after initialization.");
283✔
812

813
        Self::setup_termination_handler(self.should_keep_running.clone(), false);
283✔
814

815
        let burnchain_result = Self::instantiate_burnchain_state(
283✔
816
            &self.config,
283✔
817
            self.should_keep_running.clone(),
283✔
818
            burnchain_opt,
283✔
819
            coordinator_senders.clone(),
283✔
820
        );
821

822
        let mut burnchain = match burnchain_result {
282✔
823
            Ok(burnchain_controller) => burnchain_controller,
282✔
824
            Err(burnchain_error::ShutdownInitiated) => {
825
                info!("Exiting stacks-node");
1✔
826
                return None;
1✔
827
            }
UNCOV
828
            Err(e) => {
×
UNCOV
829
                error!("Error initializing burnchain: {e}");
×
UNCOV
830
                info!("Exiting stacks-node");
×
UNCOV
831
                return None;
×
832
            }
833
        };
834

835
        let burnchain_config = burnchain.get_burnchain();
282✔
836
        self.burnchain = Some(burnchain_config.clone());
282✔
837

838
        // can we mine?
839
        let is_miner = self.check_is_miner(&mut burnchain);
282✔
840
        self.is_miner = Some(is_miner);
282✔
841

842
        // relayer linkup
843
        let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER);
282✔
844

845
        // set up globals so other subsystems can instantiate off of the runloop state.
846
        let globals = Globals::new(
282✔
847
            coordinator_senders,
282✔
848
            self.get_miner_status(),
282✔
849
            relay_send,
282✔
850
            self.counters.clone(),
282✔
851
            self.pox_watchdog_comms.clone(),
282✔
852
            self.should_keep_running.clone(),
282✔
853
            mine_start,
282✔
854
            LeaderKeyRegistrationState::default(),
282✔
855
        );
856
        self.set_globals(globals.clone());
282✔
857

858
        // have headers; boot up the chains coordinator and instantiate the chain state
859
        let coordinator_thread_handle = self.spawn_chains_coordinator(
282✔
860
            &burnchain_config,
282✔
861
            coordinator_receivers,
282✔
862
            globals.get_miner_status(),
282✔
863
        );
864
        self.instantiate_pox_watchdog();
282✔
865
        self.start_prometheus();
282✔
866

867
        // We announce a new burn block so that the chains coordinator
868
        // can resume prior work and handle eventual unprocessed sortitions
869
        // stored during a previous session.
870
        globals.coord().announce_new_burn_block();
282✔
871

872
        // Make sure at least one sortition has happened, and make sure it's globally available
873
        let sortdb = burnchain.sortdb_mut();
282✔
874
        let (rc_aligned_height, sn) =
282✔
875
            RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config);
282✔
876

877
        let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height {
282✔
878
            // need at least one sortition to happen.
879
            burnchain
281✔
880
                .wait_for_sortitions(globals.coord().clone(), sn.block_height + 1)
281✔
881
                .expect("Unable to get burnchain tip")
281✔
882
                .block_snapshot
281✔
883
        } else {
884
            sn
1✔
885
        };
886

887
        globals.set_last_sortition(burnchain_tip_snapshot);
282✔
888

889
        // Boot up the p2p network and relayer, and figure out how many sortitions we have so far
890
        // (it could be non-zero if the node is resuming from chainstate)
891
        let mut node = StacksNode::spawn(self, globals.clone(), relay_recv);
282✔
892

893
        // Wait for all pending sortitions to process
894
        let burnchain_db = burnchain_config
282✔
895
            .open_burnchain_db(true)
282✔
896
            .expect("FATAL: failed to open burnchain DB");
282✔
897
        let burnchain_db_tip = burnchain_db
282✔
898
            .get_canonical_chain_tip()
282✔
899
            .expect("FATAL: failed to query burnchain DB");
282✔
900
        let mut burnchain_tip = burnchain
282✔
901
            .wait_for_sortitions(globals.coord().clone(), burnchain_db_tip.block_height)
282✔
902
            .expect("Unable to get burnchain tip");
282✔
903

904
        // Start the runloop
905
        debug!("Runloop: Begin run loop");
282✔
906
        self.counters.bump_blocks_processed();
282✔
907

908
        let mut sortition_db_height = rc_aligned_height;
282✔
909
        let mut burnchain_height = sortition_db_height;
282✔
910

911
        // prepare to fetch the first reward cycle!
912
        debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}");
282✔
913

914
        let mut last_tenure_sortition_height = 0;
282✔
915

916
        loop {
917
            if !globals.keep_running() {
366,100✔
918
                // The p2p thread relies on the same atomic_bool, it will
919
                // discontinue its execution after completing its ongoing runloop epoch.
920
                info!("Terminating p2p process");
141✔
921
                info!("Terminating relayer");
141✔
922
                info!("Terminating chains-coordinator");
141✔
923

924
                globals.coord().stop_chains_coordinator();
141✔
925
                coordinator_thread_handle.join().unwrap();
141✔
926
                let peer_network = node.join();
141✔
927

928
                // Data that will be passed to Nakamoto run loop
929
                // Only gets transferred on clean shutdown of neon run loop
930
                let data_to_naka = Neon2NakaData::new(globals, peer_network);
141✔
931

932
                info!("Exiting stacks-node");
141✔
933
                break Some(data_to_naka);
141✔
934
            }
365,959✔
935

936
            let remote_chain_height = burnchain.get_headers_height() - 1;
365,959✔
937

938
            // wait until it's okay to process the next reward cycle's sortitions.
939
            let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait(
365,959✔
940
                &burnchain_config,
365,959✔
941
                &burnchain_tip,
365,959✔
942
                remote_chain_height,
365,959✔
943
            ) {
365,959✔
944
                Ok(x) => x,
365,874✔
945
                Err(e) => {
85✔
946
                    debug!("Runloop: PoX sync wait routine aborted: {e:?}");
85✔
947
                    continue;
85✔
948
                }
949
            };
950

951
            // calculate burnchain sync percentage
952
            let percent: f64 = if remote_chain_height > 0 {
365,874✔
953
                burnchain_tip.block_snapshot.block_height as f64 / remote_chain_height as f64
365,872✔
954
            } else {
955
                0.0
2✔
956
            };
957

958
            // Download each burnchain block and process their sortitions.  This, in turn, will
959
            // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and
960
            // process them.  This loop runs for one reward cycle, so that the next pass of the
961
            // runloop will cause the PoX sync watchdog to wait until it believes that the node has
962
            // obtained all the Stacks blocks it can.
963
            debug!(
365,874✔
964
                "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})",
UNCOV
965
                burnchain_config
×
UNCOV
966
                    .block_height_to_reward_cycle(target_burnchain_block_height)
×
UNCOV
967
                    .expect("FATAL: target burnchain block height does not have a reward cycle");
×
968
                "total_burn_sync_percent" => %percent,
UNCOV
969
                "local_burn_height" => burnchain_tip.block_snapshot.block_height,
×
UNCOV
970
                "remote_tip_height" => remote_chain_height
×
971
            );
972

973
            loop {
974
                if !globals.keep_running() {
365,915✔
975
                    break;
43✔
976
                }
365,872✔
977

978
                let (next_burnchain_tip, tip_burnchain_height) =
365,829✔
979
                    match burnchain.sync(Some(target_burnchain_block_height)) {
365,872✔
980
                        Ok(x) => x,
365,829✔
981
                        Err(e) => {
43✔
982
                            warn!("Runloop: Burnchain controller stopped: {e}");
43✔
983
                            continue;
43✔
984
                        }
985
                    };
986

987
                // *now* we know the burnchain height
988
                burnchain_tip = next_burnchain_tip;
365,829✔
989
                burnchain_height = tip_burnchain_height;
365,829✔
990

991
                let sortition_tip = &burnchain_tip.block_snapshot.sortition_id;
365,829✔
992
                let next_sortition_height = burnchain_tip.block_snapshot.block_height;
365,829✔
993

994
                if next_sortition_height != last_tenure_sortition_height {
365,829✔
995
                    info!(
11,617✔
996
                        "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}"
997
                    );
998
                }
354,212✔
999

1000
                if next_sortition_height > sortition_db_height {
365,829✔
1001
                    debug!(
11,457✔
1002
                        "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}"
1003
                    );
1004

1005
                    debug!("Runloop: block mining until we process all sortitions");
11,457✔
1006
                    signal_mining_blocked(globals.get_miner_status());
11,457✔
1007

1008
                    // first, let's process all blocks in (sortition_db_height, next_sortition_height]
1009
                    for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) {
62,886✔
1010
                        // stop mining so we can advance the sortition DB and so our
1011
                        // ProcessTenure() directive (sent by relayer_sortition_notify() below)
1012
                        // will be unblocked.
1013

1014
                        let block = {
62,886✔
1015
                            let ic = burnchain.sortdb_ref().index_conn();
62,886✔
1016
                            SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip)
62,886✔
1017
                                .unwrap()
62,886✔
1018
                                .expect(
62,886✔
1019
                                    "Failed to find block in fork processed by burnchain indexer",
62,886✔
1020
                                )
1021
                        };
1022

1023
                        let sortition_id = &block.sortition_id;
62,886✔
1024

1025
                        // Have the node process the new block, that can include, or not, a sortition.
1026
                        node.process_burnchain_state(
62,886✔
1027
                            self.config(),
62,886✔
1028
                            burnchain.sortdb_mut(),
62,886✔
1029
                            sortition_id,
62,886✔
1030
                            ibd,
62,886✔
1031
                        );
1032

1033
                        // Now, tell the relayer to check if it won a sortition during this block,
1034
                        // and, if so, to process and advertize the block.  This is basically a
1035
                        // no-op during boot-up.
1036
                        //
1037
                        // _this will block if the relayer's buffer is full_
1038
                        if !node.relayer_sortition_notify() {
62,886✔
1039
                            // First check if we were supposed to cleanly exit
1040
                            if !globals.keep_running() {
72✔
1041
                                // The p2p thread relies on the same atomic_bool, it will
1042
                                // discontinue its execution after completing its ongoing runloop epoch.
1043
                                info!("Terminating p2p process");
72✔
1044
                                info!("Terminating relayer");
72✔
1045
                                info!("Terminating chains-coordinator");
72✔
1046

1047
                                globals.coord().stop_chains_coordinator();
72✔
1048
                                coordinator_thread_handle.join().unwrap();
72✔
1049
                                let peer_network = node.join();
72✔
1050

1051
                                // Data that will be passed to Nakamoto run loop
1052
                                // Only gets transferred on clean shutdown of neon run loop
1053
                                let data_to_naka = Neon2NakaData::new(globals, peer_network);
72✔
1054

1055
                                info!("Exiting stacks-node");
72✔
1056
                                return Some(data_to_naka);
72✔
1057
                            }
×
1058
                            // relayer hung up, exit.
1059
                            error!("Runloop: Block relayer and miner hung up, exiting.");
×
UNCOV
1060
                            return None;
×
1061
                        }
62,814✔
1062
                    }
1063

1064
                    debug!("Runloop: enable miner after processing sortitions");
11,385✔
1065
                    signal_mining_ready(globals.get_miner_status());
11,385✔
1066

1067
                    debug!(
11,385✔
1068
                        "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})"
1069
                    );
1070

1071
                    sortition_db_height = next_sortition_height;
11,385✔
1072
                } else if ibd {
354,372✔
UNCOV
1073
                    // drive block processing after we reach the burnchain tip.
×
UNCOV
1074
                    // we may have downloaded all the blocks already,
×
1075
                    // so we can't rely on the relayer alone to
×
UNCOV
1076
                    // drive it.
×
UNCOV
1077
                    globals.coord().announce_new_stacks_block();
×
1078
                }
354,372✔
1079

1080
                if burnchain_height >= target_burnchain_block_height
365,757✔
1081
                    || burnchain_height >= remote_chain_height
36✔
1082
                {
1083
                    break;
365,759✔
1084
                }
2,147,483,647✔
1085
            }
1086

1087
            if sortition_db_height >= burnchain_height && !ibd {
365,802✔
1088
                let canonical_stacks_tip_height =
354,334✔
1089
                    SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn())
354,334✔
1090
                        .map(|snapshot| snapshot.canonical_stacks_tip_height)
354,334✔
1091
                        .unwrap_or(0);
354,334✔
1092
                if canonical_stacks_tip_height < mine_start {
354,334✔
UNCOV
1093
                    info!(
×
1094
                        "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip"
1095
                    );
1096
                } else {
1097
                    // once we've synced to the chain tip once, don't apply this check again.
1098
                    //  this prevents a possible corner case in the event of a PoX fork.
1099
                    mine_start = 0;
354,334✔
1100
                    globals.set_start_mining_height_if_zero(sortition_db_height);
354,334✔
1101

1102
                    // at tip, and not downloading. proceed to mine.
1103
                    if last_tenure_sortition_height != sortition_db_height {
354,334✔
1104
                        if is_miner {
8,003✔
1105
                            info!(
8,001✔
1106
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks"
1107
                            );
1108
                        } else {
1109
                            info!(
2✔
1110
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}."
1111
                            );
1112
                        }
1113
                        last_tenure_sortition_height = sortition_db_height;
8,003✔
1114
                    }
346,331✔
1115

1116
                    if !node.relayer_issue_tenure(ibd) {
354,334✔
1117
                        // First check if we were supposed to cleanly exit
1118
                        if !globals.keep_running() {
69✔
1119
                            // The p2p thread relies on the same atomic_bool, it will
1120
                            // discontinue its execution after completing its ongoing runloop epoch.
1121
                            info!("Terminating p2p process");
69✔
1122
                            info!("Terminating relayer");
69✔
1123
                            info!("Terminating chains-coordinator");
69✔
1124

1125
                            globals.coord().stop_chains_coordinator();
69✔
1126
                            coordinator_thread_handle.join().unwrap();
69✔
1127
                            let peer_network = node.join();
69✔
1128

1129
                            // Data that will be passed to Nakamoto run loop
1130
                            // Only gets transferred on clean shutdown of neon run loop
1131
                            let data_to_naka = Neon2NakaData::new(globals, peer_network);
69✔
1132

1133
                            info!("Exiting stacks-node");
69✔
1134
                            return Some(data_to_naka);
69✔
UNCOV
1135
                        }
×
1136
                        // relayer hung up, exit.
UNCOV
1137
                        error!("Runloop: Block relayer and miner hung up, exiting.");
×
UNCOV
1138
                        break None;
×
1139
                    }
155,320✔
1140
                }
1141
            }
11,468✔
1142
        }
1143
    }
283✔
1144
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc