• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

stacks-network / stacks-core / 25801484257-1

13 May 2026 01:15PM UTC coverage: 85.648% (-0.06%) from 85.712%
25801484257-1

Pull #7183

github

2d7e6d
web-flow
Merge 420cb597a into 31276d071
Pull Request #7183: Fix problematic transaction handling

110 of 130 new or added lines in 7 files covered. (84.62%)

5464 existing lines in 98 files now uncovered.

188263 of 219809 relevant lines covered (85.65%)

18940648.33 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.52
/stacks-node/src/run_loop/neon.rs
1
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
2
// Copyright (C) 2020-2026 Stacks Open Internet Foundation
3
//
4
// This program is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8
//
9
// This program is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13
//
14
// You should have received a copy of the GNU General Public License
15
// along with this program.  If not, see <http://www.gnu.org/licenses/>.
16

17
#[cfg(test)]
18
use std::sync::atomic::AtomicU64;
19
use std::sync::atomic::{AtomicBool, Ordering};
20
use std::sync::mpsc::sync_channel;
21
use std::sync::{Arc, Mutex};
22
use std::thread;
23
use std::thread::JoinHandle;
24

25
use libc;
26
use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType};
27
use stacks::burnchains::{Burnchain, Error as burnchain_error};
28
use stacks::chainstate::burn::db::sortdb::SortitionDB;
29
use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash};
30
use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers};
31
use stacks::chainstate::coordinator::{
32
    migrate_chainstate_dbs, ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication,
33
    Error as coord_error,
34
};
35
use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState};
36
use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus};
37
use stacks::core::StacksEpochId;
38
use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment};
39
#[cfg(test)]
40
use stacks::util::tests::TestFlag;
41
use stacks::util_lib::db::Error as db_error;
42
use stacks_common::deps_common::ctrlc as termination;
43
use stacks_common::deps_common::ctrlc::SignalId;
44
use stacks_common::types::PublicKey;
45
use stacks_common::util::hash::Hash160;
46
use stx_genesis::GenesisData;
47

48
use super::RunLoopCallbacks;
49
use crate::burnchains::{make_bitcoin_indexer, Error};
50
use crate::globals::NeonGlobals as Globals;
51
use crate::monitoring::{start_serving_monitoring_metrics, MonitoringError};
52
use crate::neon_node::{
53
    LeaderKeyRegistrationState, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER,
54
};
55
use crate::node::{
56
    get_account_balances, get_account_lockups, get_names, get_namespaces,
57
    use_test_genesis_chainstate,
58
};
59
use crate::run_loop::boot_nakamoto::Neon2NakaData;
60
use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms};
61
use crate::{
62
    run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain,
63
};
64

65
pub const STDERR: i32 = 2;
66

67
#[cfg(test)]
68
#[derive(Clone, Default)]
69
pub struct RunLoopField<T>(pub Arc<Mutex<T>>);
70

71
#[cfg(not(test))]
72
#[derive(Clone, Default)]
73
pub struct RunLoopField<T>(pub std::marker::PhantomData<T>);
74

75
#[cfg(test)]
76
#[derive(Clone)]
77
pub struct RunLoopCounter(pub Arc<AtomicU64>);
78

79
#[cfg(not(test))]
80
#[derive(Clone)]
81
pub struct RunLoopCounter();
82

83
impl Default for RunLoopCounter {
84
    #[cfg(test)]
85
    fn default() -> Self {
6,069✔
86
        RunLoopCounter(Arc::new(AtomicU64::new(0)))
6,069✔
87
    }
6,069✔
88
    #[cfg(not(test))]
89
    fn default() -> Self {
90
        Self()
91
    }
92
}
93

94
impl<T: Clone> RunLoopField<Option<T>> {
95
    #[cfg(test)]
96
    pub fn get(&self) -> T {
1✔
97
        self.0.lock().unwrap().clone().unwrap()
1✔
98
    }
1✔
99
}
100

101
impl RunLoopCounter {
102
    #[cfg(test)]
103
    pub fn get(&self) -> u64 {
3,330✔
104
        self.0.load(Ordering::SeqCst)
3,330✔
105
    }
3,330✔
106

107
    #[cfg(test)]
108
    pub fn load(&self, ordering: Ordering) -> u64 {
65,153✔
109
        self.0.load(ordering)
65,153✔
110
    }
65,153✔
111
}
112

113
#[cfg(test)]
114
impl std::ops::Deref for RunLoopCounter {
115
    type Target = Arc<AtomicU64>;
116

117
    fn deref(&self) -> &Self::Target {
1,961✔
118
        &self.0
1,961✔
119
    }
1,961✔
120
}
121

122
#[derive(Clone, Default)]
123
pub struct Counters {
124
    pub blocks_processed: RunLoopCounter,
125
    pub microblocks_processed: RunLoopCounter,
126
    pub missed_tenures: RunLoopCounter,
127
    pub missed_microblock_tenures: RunLoopCounter,
128
    pub cancelled_commits: RunLoopCounter,
129

130
    pub sortitions_processed: RunLoopCounter,
131

132
    pub naka_submitted_vrfs: RunLoopCounter,
133
    /// the number of submitted commits
134
    pub neon_submitted_commits: RunLoopCounter,
135
    /// the burn block height when the last commit was submitted
136
    pub neon_submitted_commit_last_burn_height: RunLoopCounter,
137
    pub naka_submitted_commits: RunLoopCounter,
138
    /// the burn block height when the last commit was submitted
139
    pub naka_submitted_commit_last_burn_height: RunLoopCounter,
140
    pub naka_mined_blocks: RunLoopCounter,
141
    pub naka_rejected_blocks: RunLoopCounter,
142
    pub naka_proposed_blocks: RunLoopCounter,
143
    pub naka_mined_tenures: RunLoopCounter,
144
    pub naka_signer_pushed_blocks: RunLoopCounter,
145
    pub naka_miner_directives: RunLoopCounter,
146
    pub naka_submitted_commit_last_stacks_tip: RunLoopCounter,
147
    pub naka_submitted_commit_last_commit_amount: RunLoopCounter,
148
    pub naka_submitted_commit_last_parent_tenure_id: RunLoopField<Option<ConsensusHash>>,
149

150
    pub naka_miner_current_rejections: RunLoopCounter,
151
    pub naka_miner_current_rejections_timeout_secs: RunLoopCounter,
152

153
    #[cfg(test)]
154
    pub skip_commit_op: TestFlag<bool>,
155
}
156

157
impl Counters {
158
    pub fn new() -> Self {
1✔
159
        Self::default()
1✔
160
    }
1✔
161

162
    #[cfg(test)]
163
    fn inc(ctr: &RunLoopCounter) {
33,726✔
164
        ctr.0.fetch_add(1, Ordering::SeqCst);
33,726✔
165
    }
33,726✔
166

167
    #[cfg(not(test))]
168
    fn inc(_ctr: &RunLoopCounter) {}
169

170
    #[cfg(test)]
171
    fn set(ctr: &RunLoopCounter, value: u64) {
12,275✔
172
        ctr.0.store(value, Ordering::SeqCst);
12,275✔
173
    }
12,275✔
174

175
    #[cfg(not(test))]
176
    fn set(_ctr: &RunLoopCounter, _value: u64) {}
177

178
    #[cfg(test)]
179
    fn update<T: Clone>(ctr: &RunLoopField<Option<T>>, value: &T) {
1,721✔
180
        let mut mutex = ctr.0.lock().expect("FATAL: test counter mutext poisoned");
1,721✔
181
        let _ = mutex.replace(value.clone());
1,721✔
182
    }
1,721✔
183

184
    #[cfg(not(test))]
185
    fn update<T: Clone>(_ctr: &RunLoopField<Option<T>>, _value: &T) {}
186

187
    pub fn bump_blocks_processed(&self) {
12,034✔
188
        Counters::inc(&self.blocks_processed);
12,034✔
189
    }
12,034✔
190

191
    pub fn bump_sortitions_processed(&self) {
4,423✔
192
        Counters::inc(&self.sortitions_processed);
4,423✔
193
    }
4,423✔
194

UNCOV
195
    pub fn bump_microblocks_processed(&self) {
×
UNCOV
196
        Counters::inc(&self.microblocks_processed);
×
UNCOV
197
    }
×
198

199
    pub fn bump_missed_tenures(&self) {
1,112✔
200
        Counters::inc(&self.missed_tenures);
1,112✔
201
    }
1,112✔
202

UNCOV
203
    pub fn bump_missed_microblock_tenures(&self) {
×
UNCOV
204
        Counters::inc(&self.missed_microblock_tenures);
×
UNCOV
205
    }
×
206

UNCOV
207
    pub fn bump_cancelled_commits(&self) {
×
UNCOV
208
        Counters::inc(&self.cancelled_commits);
×
UNCOV
209
    }
×
210

211
    pub fn bump_neon_submitted_commits(&self, committed_burn_height: u64) {
6,774✔
212
        Counters::inc(&self.neon_submitted_commits);
6,774✔
213
        Counters::set(
6,774✔
214
            &self.neon_submitted_commit_last_burn_height,
6,774✔
215
            committed_burn_height,
6,774✔
216
        );
217
    }
6,774✔
218

219
    pub fn bump_naka_submitted_vrfs(&self) {
38✔
220
        Counters::inc(&self.naka_submitted_vrfs);
38✔
221
    }
38✔
222

223
    pub fn bump_naka_submitted_commits(
1,721✔
224
        &self,
1,721✔
225
        committed_burn_height: u64,
1,721✔
226
        committed_stacks_height: u64,
1,721✔
227
        committed_sats_amount: u64,
1,721✔
228
        committed_parent_tenure_id: &ConsensusHash,
1,721✔
229
    ) {
1,721✔
230
        Counters::inc(&self.naka_submitted_commits);
1,721✔
231
        Counters::set(
1,721✔
232
            &self.naka_submitted_commit_last_burn_height,
1,721✔
233
            committed_burn_height,
1,721✔
234
        );
235
        Counters::set(
1,721✔
236
            &self.naka_submitted_commit_last_stacks_tip,
1,721✔
237
            committed_stacks_height,
1,721✔
238
        );
239
        Counters::set(
1,721✔
240
            &self.naka_submitted_commit_last_commit_amount,
1,721✔
241
            committed_sats_amount,
1,721✔
242
        );
243
        Counters::update(
1,721✔
244
            &self.naka_submitted_commit_last_parent_tenure_id,
1,721✔
245
            committed_parent_tenure_id,
1,721✔
246
        );
247
    }
1,721✔
248

249
    pub fn bump_naka_mined_blocks(&self) {
2,289✔
250
        Counters::inc(&self.naka_mined_blocks);
2,289✔
251
    }
2,289✔
252

253
    pub fn bump_naka_proposed_blocks(&self) {
2,385✔
254
        Counters::inc(&self.naka_proposed_blocks);
2,385✔
255
    }
2,385✔
256

257
    pub fn bump_naka_rejected_blocks(&self) {
96✔
258
        Counters::inc(&self.naka_rejected_blocks);
96✔
259
    }
96✔
260

261
    pub fn bump_naka_signer_pushed_blocks(&self) {
15✔
262
        Counters::inc(&self.naka_signer_pushed_blocks);
15✔
263
    }
15✔
264

265
    pub fn bump_naka_mined_tenures(&self) {
1,309✔
266
        Counters::inc(&self.naka_mined_tenures);
1,309✔
267
    }
1,309✔
268

269
    pub fn bump_naka_miner_directives(&self) {
1,530✔
270
        Counters::inc(&self.naka_miner_directives);
1,530✔
271
    }
1,530✔
272

273
    pub fn set_microblocks_processed(&self, value: u64) {
68✔
274
        Counters::set(&self.microblocks_processed, value)
68✔
275
    }
68✔
276

277
    pub fn set_miner_current_rejections_timeout_secs(&self, value: u64) {
135✔
278
        Counters::set(&self.naka_miner_current_rejections_timeout_secs, value)
135✔
279
    }
135✔
280

281
    pub fn set_miner_current_rejections(&self, value: u32) {
135✔
282
        Counters::set(&self.naka_miner_current_rejections, u64::from(value))
135✔
283
    }
135✔
284
}
285

286
/// Coordinating a node running in neon mode.
287
pub struct RunLoop {
288
    config: Config,
289
    pub callbacks: RunLoopCallbacks,
290
    globals: Option<Globals>,
291
    counters: Counters,
292
    coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>,
293
    should_keep_running: Arc<AtomicBool>,
294
    event_dispatcher: EventDispatcher,
295
    pox_watchdog: Option<PoxSyncWatchdog>, // can't be instantiated until .start() is called
296
    is_miner: Option<bool>,                // not known until .start() is called
297
    burnchain: Option<Burnchain>,          // not known until .start() is called
298
    pox_watchdog_comms: PoxSyncWatchdogComms,
299
    /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is
300
    /// instantiated (namely, so the test framework can access it).
301
    miner_status: Arc<Mutex<MinerStatus>>,
302
    monitoring_thread: Option<JoinHandle<Result<(), MonitoringError>>>,
303
}
304

305
/// Write to stderr in an async-safe manner.
306
/// See signal-safety(7)
UNCOV
307
fn async_safe_write_stderr(msg: &str) {
×
308
    #[cfg(windows)]
309
    unsafe {
310
        // write(2) inexplicably has a different ABI only on Windows.
311
        libc::write(
312
            STDERR,
313
            msg.as_ptr() as *const libc::c_void,
314
            msg.len() as u32,
315
        );
316
    }
317
    #[cfg(not(windows))]
UNCOV
318
    unsafe {
×
UNCOV
319
        libc::write(STDERR, msg.as_ptr() as *const libc::c_void, msg.len());
×
UNCOV
320
    }
×
UNCOV
321
}
×
322

323
impl RunLoop {
324
    /// Sets up a runloop and node, given a config.
325
    pub fn new(config: Config) -> Self {
286✔
326
        let channels = CoordinatorCommunication::instantiate();
286✔
327
        let should_keep_running = Arc::new(AtomicBool::new(true));
286✔
328
        let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone());
286✔
329
        let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready(
286✔
330
            config.burnchain.burn_fee_cap,
286✔
331
        )));
332

333
        let mut event_dispatcher = EventDispatcher::new_with_custom_queue_size(
286✔
334
            config.get_working_dir(),
286✔
335
            config.node.effective_event_dispatcher_queue_size(),
286✔
336
        );
337
        for observer in config.events_observers.iter() {
1,005✔
338
            event_dispatcher.register_observer(observer);
980✔
339
        }
980✔
340

341
        Self {
286✔
342
            config,
286✔
343
            globals: None,
286✔
344
            coordinator_channels: Some(channels),
286✔
345
            callbacks: RunLoopCallbacks::new(),
286✔
346
            counters: Counters::default(),
286✔
347
            should_keep_running,
286✔
348
            event_dispatcher,
286✔
349
            pox_watchdog: None,
286✔
350
            is_miner: None,
286✔
351
            burnchain: None,
286✔
352
            pox_watchdog_comms,
286✔
353
            miner_status,
286✔
354
            monitoring_thread: None,
286✔
355
        }
286✔
356
    }
286✔
357

358
    pub fn get_globals(&self) -> Globals {
566✔
359
        self.globals
566✔
360
            .clone()
566✔
361
            .expect("FATAL: globals not instantiated")
566✔
362
    }
566✔
363

364
    fn set_globals(&mut self, globals: Globals) {
283✔
365
        self.globals = Some(globals);
283✔
366
    }
283✔
367

368
    pub fn get_coordinator_channel(&self) -> Option<CoordinatorChannels> {
284✔
369
        self.coordinator_channels.as_ref().map(|x| x.1.clone())
284✔
370
    }
284✔
371

372
    pub fn get_blocks_processed_arc(&self) -> RunLoopCounter {
37✔
373
        self.counters.blocks_processed.clone()
37✔
374
    }
37✔
375

UNCOV
376
    pub fn get_microblocks_processed_arc(&self) -> RunLoopCounter {
×
UNCOV
377
        self.counters.microblocks_processed.clone()
×
UNCOV
378
    }
×
379

380
    pub fn get_missed_tenures_arc(&self) -> RunLoopCounter {
1✔
381
        self.counters.missed_tenures.clone()
1✔
382
    }
1✔
383

UNCOV
384
    pub fn get_missed_microblock_tenures_arc(&self) -> RunLoopCounter {
×
UNCOV
385
        self.counters.missed_microblock_tenures.clone()
×
UNCOV
386
    }
×
387

UNCOV
388
    pub fn get_cancelled_commits_arc(&self) -> RunLoopCounter {
×
UNCOV
389
        self.counters.cancelled_commits.clone()
×
UNCOV
390
    }
×
391

392
    pub fn get_counters(&self) -> Counters {
548✔
393
        self.counters.clone()
548✔
394
    }
548✔
395

396
    pub fn config(&self) -> &Config {
64,187✔
397
        &self.config
64,187✔
398
    }
64,187✔
399

400
    pub fn get_event_dispatcher(&self) -> EventDispatcher {
809✔
401
        self.event_dispatcher.clone()
809✔
402
    }
809✔
403

404
    pub fn is_miner(&self) -> bool {
283✔
405
        self.is_miner.unwrap_or(false)
283✔
406
    }
283✔
407

408
    pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms {
1✔
409
        self.pox_watchdog_comms.clone()
1✔
410
    }
1✔
411

412
    pub fn get_termination_switch(&self) -> Arc<AtomicBool> {
746✔
413
        self.should_keep_running.clone()
746✔
414
    }
746✔
415

416
    pub fn get_burnchain(&self) -> Burnchain {
1,132✔
417
        self.burnchain
1,132✔
418
            .clone()
1,132✔
419
            .expect("FATAL: tried to get runloop burnchain before calling .start()")
1,132✔
420
    }
1,132✔
421

422
    pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog {
377,157✔
423
        self.pox_watchdog
377,157✔
424
            .as_mut()
377,157✔
425
            .expect("FATAL: tried to get PoX watchdog before calling .start()")
377,157✔
426
    }
377,157✔
427

428
    pub fn get_miner_status(&self) -> Arc<Mutex<MinerStatus>> {
283✔
429
        self.miner_status.clone()
283✔
430
    }
283✔
431

432
    /// Set up termination handler.  Have a signal set the `should_keep_running` atomic bool to
433
    /// false.  Panics of called more than once.
434
    pub fn setup_termination_handler(keep_running_writer: Arc<AtomicBool>, allow_err: bool) {
530✔
435
        let install = termination::set_handler(move |sig_id| match sig_id {
530✔
436
            SignalId::Bus => {
437
                let msg = "Caught SIGBUS; crashing immediately and dumping core\n";
×
UNCOV
438
                async_safe_write_stderr(msg);
×
439
                unsafe {
UNCOV
440
                    libc::abort();
×
441
                }
442
            }
UNCOV
443
            _ => {
×
UNCOV
444
                let msg = format!("Graceful termination request received (signal `{sig_id}`), will complete the ongoing runloop cycles and terminate\n");
×
UNCOV
445
                async_safe_write_stderr(&msg);
×
UNCOV
446
                keep_running_writer.store(false, Ordering::SeqCst);
×
UNCOV
447
            }
×
UNCOV
448
        });
×
449

450
        if let Err(e) = install {
530✔
451
            // integration tests can do this
452
            if cfg!(test) || allow_err {
297✔
453
                info!("Error setting up signal handler, may have already been set");
297✔
454
            } else {
UNCOV
455
                panic!("FATAL: error setting termination handler - {e}");
×
456
            }
457
        }
233✔
458
    }
530✔
459

460
    /// Seconds to wait before retrying UTXO check during startup
461
    const UTXO_RETRY_INTERVAL: u64 = 10;
462
    /// Number of times to retry UTXO check during startup
463
    const UTXO_RETRY_COUNT: u64 = 6;
464

465
    /// Determine if we're the miner.
466
    /// If there's a network error, then assume that we're not a miner.
467
    fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool {
284✔
468
        if self.config.node.miner {
284✔
469
            // If we are mock mining, then we don't need to check for UTXOs and
470
            // we can just return true.
471
            if self.config.get_node_config(false).mock_mining {
279✔
472
                return true;
5✔
473
            }
274✔
474
            let keychain = Keychain::default(self.config.node.seed.clone());
274✔
475
            let mut op_signer = keychain.generate_op_signer();
274✔
476
            if let Err(e) = burnchain.create_wallet_if_dne() {
274✔
477
                warn!("Error when creating wallet: {e:?}");
×
478
            }
274✔
479
            let mut btc_addrs = vec![(
274✔
480
                StacksEpochId::Epoch2_05,
274✔
481
                // legacy
274✔
482
                BitcoinAddress::from_bytes_legacy(
274✔
483
                    self.config.burnchain.get_bitcoin_network().1,
274✔
484
                    LegacyBitcoinAddressType::PublicKeyHash,
274✔
485
                    &Hash160::from_data(&op_signer.get_public_key().to_bytes()).0,
274✔
486
                )
274✔
487
                .expect("FATAL: failed to construct legacy bitcoin address"),
274✔
488
            )];
274✔
489
            if self.config.miner.segwit {
274✔
UNCOV
490
                btc_addrs.push((
×
UNCOV
491
                    StacksEpochId::Epoch21,
×
UNCOV
492
                    // segwit p2wpkh
×
UNCOV
493
                    BitcoinAddress::from_bytes_segwit_p2wpkh(
×
UNCOV
494
                        self.config.burnchain.get_bitcoin_network().1,
×
UNCOV
495
                        &Hash160::from_data(&op_signer.get_public_key().to_bytes_compressed()).0,
×
UNCOV
496
                    )
×
UNCOV
497
                    .expect("FATAL: failed to construct segwit p2wpkh address"),
×
UNCOV
498
                ));
×
499
            }
274✔
500

501
            // retry UTXO check a few times, in case bitcoind is still starting up
502
            for _ in 0..Self::UTXO_RETRY_COUNT {
274✔
503
                for (epoch_id, btc_addr) in &btc_addrs {
282✔
504
                    info!("Miner node: checking UTXOs at address: {btc_addr}");
282✔
505
                    let utxos =
282✔
506
                        burnchain.get_utxos(*epoch_id, &op_signer.get_public_key(), 1, None, 0);
282✔
507
                    if utxos.is_none() {
282✔
508
                        warn!("UTXOs not found for {btc_addr}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {btc_addr} (importaddress)");
9✔
509
                    } else {
510
                        info!("UTXOs found - will run as a Miner node");
273✔
511
                        return true;
273✔
512
                    }
513
                }
514
                thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL));
9✔
515
            }
516
            panic!("No UTXOs found, exiting");
1✔
517
        } else {
518
            info!("Will run as a Follower node");
5✔
519
            false
5✔
520
        }
521
    }
283✔
522

523
    /// Instantiate the burnchain client and databases.
524
    /// Fetches headers and instantiates the burnchain.
525
    /// Panics on failure.
526
    pub fn instantiate_burnchain_state(
530✔
527
        config: &Config,
530✔
528
        should_keep_running: Arc<AtomicBool>,
530✔
529
        burnchain_opt: Option<Burnchain>,
530✔
530
        coordinator_senders: CoordinatorChannels,
530✔
531
    ) -> Result<BitcoinRegtestController, burnchain_error> {
530✔
532
        // Initialize and start the burnchain.
533
        let mut burnchain_controller = BitcoinRegtestController::with_burnchain(
530✔
534
            config.clone(),
530✔
535
            Some(coordinator_senders),
530✔
536
            burnchain_opt,
530✔
537
            Some(should_keep_running.clone()),
530✔
538
        );
539

540
        let burnchain = burnchain_controller.get_burnchain();
530✔
541
        let epochs = burnchain_controller.get_stacks_epochs();
530✔
542

543
        // sanity check -- epoch data must be valid
544
        Config::assert_valid_epoch_settings(&burnchain, &epochs);
530✔
545

546
        // Upgrade chainstate databases if they exist already
547
        // NOTE: this has to be done before the subsequent call to
548
        // `burnchain_controller.connect_dbs()` below!
549
        match migrate_chainstate_dbs(
530✔
550
            &epochs,
530✔
551
            &burnchain,
530✔
552
            &config.get_burn_db_file_path(),
530✔
553
            &config.get_chainstate_path_str(),
530✔
554
            Some(config.node.get_marf_opts()),
530✔
555
        ) {
556
            Ok(_) => {}
530✔
557
            Err(coord_error::DBError(db_error::TooOldForEpoch)) => {
UNCOV
558
                error!(
×
559
                    "FATAL: chainstate database(s) are not compatible with the current system epoch"
560
                );
UNCOV
561
                panic!();
×
562
            }
UNCOV
563
            Err(e) => {
×
UNCOV
564
                panic!("FATAL: unable to query filesystem or databases: {e:?}");
×
565
            }
566
        }
567

568
        info!("Start syncing Bitcoin headers, feel free to grab a cup of coffee, this can take a while");
530✔
569

570
        let burnchain_config = burnchain_controller.get_burnchain();
530✔
571
        let target_burnchain_block_height = match burnchain_config
530✔
572
            .get_highest_burnchain_block()
530✔
573
            .expect("FATAL: failed to access burnchain database")
530✔
574
        {
575
            Some(burnchain_tip) => {
246✔
576
                // database exists already, and has blocks -- just sync to its tip.
577
                let target_height = burnchain_tip.block_height + 1;
246✔
578
                debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {target_height}", burnchain_tip.block_height);
246✔
579
                target_height
246✔
580
            }
581
            None => {
582
                // database does not exist yet
583
                let target_height = 1.max(burnchain_config.first_block_height + 1);
284✔
584
                debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {target_height}");
284✔
585
                target_height
284✔
586
            }
587
        };
588

589
        burnchain_controller
530✔
590
            .start(Some(target_burnchain_block_height))
530✔
591
            .map_err(|e| {
530✔
592
                if matches!(e, Error::CoordinatorClosed)
1✔
593
                    && !should_keep_running.load(Ordering::SeqCst)
1✔
594
                {
595
                    info!("Shutdown initiated during burnchain initialization: {e}");
1✔
596
                    return burnchain_error::ShutdownInitiated;
1✔
UNCOV
597
                }
×
UNCOV
598
                error!("Burnchain controller stopped: {e}");
×
UNCOV
599
                panic!();
×
600
            })?;
1✔
601

602
        // if the chainstate DBs don't exist, this will instantiate them
603
        if let Err(e) = burnchain_controller.connect_dbs() {
529✔
UNCOV
604
            error!("Failed to connect to burnchain databases: {e}");
×
UNCOV
605
            panic!();
×
606
        };
529✔
607

608
        // TODO (hack) instantiate the sortdb in the burnchain
609
        let _ = burnchain_controller.sortdb_mut();
529✔
610
        Ok(burnchain_controller)
529✔
611
    }
530✔
612

613
    /// Boot up the stacks chainstate.
614
    /// Instantiate the chainstate and push out the boot receipts to observers
615
    /// This is only public so we can test it.
616
    pub fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState {
284✔
617
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
284✔
618

619
        // load up genesis balances
620
        let initial_balances = self
284✔
621
            .config
284✔
622
            .initial_balances
284✔
623
            .iter()
284✔
624
            .map(|e| (e.address.clone(), e.amount))
2,162✔
625
            .collect();
284✔
626

627
        // instantiate chainstate
628
        let mut boot_data = ChainStateBootData {
284✔
629
            initial_balances,
284✔
630
            post_flight_callback: None,
284✔
631
            first_burnchain_block_hash: burnchain_config.first_block_hash.clone(),
284✔
632
            first_burnchain_block_height: burnchain_config.first_block_height as u32,
284✔
633
            first_burnchain_block_timestamp: burnchain_config.first_block_timestamp,
284✔
634
            pox_constants: burnchain_config.pox_constants.clone(),
284✔
635
            get_bulk_initial_lockups: Some(Box::new(move || {
284✔
636
                get_account_lockups(use_test_genesis_data)
284✔
637
            })),
284✔
638
            get_bulk_initial_balances: Some(Box::new(move || {
284✔
639
                get_account_balances(use_test_genesis_data)
284✔
640
            })),
284✔
641
            get_bulk_initial_namespaces: Some(Box::new(move || {
284✔
642
                get_namespaces(use_test_genesis_data)
284✔
643
            })),
284✔
644
            get_bulk_initial_names: Some(Box::new(move || get_names(use_test_genesis_data))),
284✔
645
        };
646

647
        info!("About to call open_and_exec");
284✔
648
        let (chain_state_db, receipts) = StacksChainState::open_and_exec(
284✔
649
            self.config.is_mainnet(),
284✔
650
            self.config.burnchain.chain_id,
284✔
651
            &self.config.get_chainstate_path_str(),
284✔
652
            Some(&mut boot_data),
284✔
653
            Some(self.config.node.get_marf_opts()),
284✔
654
        )
284✔
655
        .unwrap();
284✔
656
        run_loop::announce_boot_receipts(
284✔
657
            &mut self.event_dispatcher,
284✔
658
            &chain_state_db,
284✔
659
            &burnchain_config.pox_constants,
284✔
660
            &receipts,
284✔
661
        );
662
        chain_state_db
284✔
663
    }
284✔
664

665
    /// Instantiate the Stacks chain state and start the chains coordinator thread.
666
    /// Returns the coordinator thread handle, and the receiving end of the coordinator's atlas
667
    /// attachment channel.
668
    fn spawn_chains_coordinator(
283✔
669
        &mut self,
283✔
670
        burnchain_config: &Burnchain,
283✔
671
        coordinator_receivers: CoordinatorReceivers,
283✔
672
        miner_status: Arc<Mutex<MinerStatus>>,
283✔
673
    ) -> JoinHandle<()> {
283✔
674
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
283✔
675

676
        // load up genesis Atlas attachments
677
        let mut atlas_config = AtlasConfig::new(self.config.is_mainnet());
283✔
678
        let genesis_attachments = GenesisData::new(use_test_genesis_data)
283✔
679
            .read_name_zonefiles()
283✔
680
            .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec()))
3,396✔
681
            .collect();
283✔
682
        atlas_config.genesis_attachments = Some(genesis_attachments);
283✔
683

684
        let chain_state_db = self.boot_chainstate(burnchain_config);
283✔
685

686
        // NOTE: re-instantiate AtlasConfig so we don't have to keep the genesis attachments around
687
        let moved_atlas_config = self.config.atlas.clone();
283✔
688
        let moved_config = self.config.clone();
283✔
689
        let moved_burnchain_config = burnchain_config.clone();
283✔
690
        let coordinator_dispatcher = self.event_dispatcher.clone();
283✔
691
        let atlas_db = AtlasDB::connect(
283✔
692
            moved_atlas_config.clone(),
283✔
693
            &self.config.get_atlas_db_file_path(),
283✔
694
            true,
695
        )
696
        .expect("Failed to connect Atlas DB during startup");
283✔
697
        let coordinator_indexer =
283✔
698
            make_bitcoin_indexer(&self.config, Some(self.should_keep_running.clone()));
283✔
699

700
        let coordinator_thread_handle = thread::Builder::new()
283✔
701
            .name(format!(
283✔
702
                "chains-coordinator-{}",
703
                &moved_config.node.rpc_bind
283✔
704
            ))
705
            .stack_size(BLOCK_PROCESSOR_STACK_SIZE)
283✔
706
            .spawn(move || {
283✔
707
                debug!(
283✔
708
                    "chains-coordinator thread ID is {:?}",
UNCOV
709
                    thread::current().id()
×
710
                );
711
                let mut cost_estimator = moved_config.make_cost_estimator();
283✔
712
                let mut fee_estimator = moved_config.make_fee_estimator();
283✔
713

714
                let coord_config = ChainsCoordinatorConfig {
283✔
715
                    txindex: moved_config.node.txindex,
283✔
716
                };
283✔
717
                ChainsCoordinator::run(
283✔
718
                    coord_config,
283✔
719
                    chain_state_db,
283✔
720
                    moved_burnchain_config,
283✔
721
                    &coordinator_dispatcher,
283✔
722
                    coordinator_receivers,
283✔
723
                    moved_atlas_config,
283✔
724
                    cost_estimator.as_deref_mut(),
283✔
725
                    fee_estimator.as_deref_mut(),
283✔
726
                    miner_status,
283✔
727
                    coordinator_indexer,
283✔
728
                    atlas_db,
283✔
729
                );
730
            })
283✔
731
            .expect("FATAL: failed to start chains coordinator thread");
283✔
732

733
        coordinator_thread_handle
283✔
734
    }
283✔
735

736
    /// Instantiate the PoX watchdog
737
    fn instantiate_pox_watchdog(&mut self) {
283✔
738
        let pox_watchdog = PoxSyncWatchdog::new(&self.config, self.pox_watchdog_comms.clone())
283✔
739
            .expect("FATAL: failed to instantiate PoX sync watchdog");
283✔
740
        self.pox_watchdog = Some(pox_watchdog);
283✔
741
    }
283✔
742

743
    /// Start Prometheus logging
744
    fn start_prometheus(&mut self) {
283✔
745
        let Some(prometheus_bind) = self.config.node.prometheus_bind.clone() else {
283✔
746
            return;
274✔
747
        };
748
        let monitoring_thread = thread::Builder::new()
9✔
749
            .name("prometheus".to_string())
9✔
750
            .spawn(move || {
9✔
751
                debug!("prometheus thread ID is {:?}", thread::current().id());
9✔
752
                start_serving_monitoring_metrics(prometheus_bind)
9✔
753
            })
9✔
754
            .expect("FATAL: failed to start monitoring thread");
9✔
755

756
        self.monitoring_thread.replace(monitoring_thread);
9✔
757
    }
283✔
758

759
    pub fn take_monitoring_thread(&mut self) -> Option<JoinHandle<Result<(), MonitoringError>>> {
244✔
760
        self.monitoring_thread.take()
244✔
761
    }
244✔
762

763
    /// Get the sortition DB's highest block height, aligned to a reward cycle boundary, and the
764
    /// highest sortition.
765
    /// Returns (height at rc start, sortition)
766
    fn get_reward_cycle_sortition_db_height(
283✔
767
        sortdb: &SortitionDB,
283✔
768
        burnchain_config: &Burnchain,
283✔
769
    ) -> (u64, BlockSnapshot) {
283✔
770
        let (stacks_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())
283✔
771
            .expect("BUG: failed to load canonical stacks chain tip hash");
283✔
772

773
        let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_ch)
283✔
774
            .expect("BUG: failed to query sortition DB")
283✔
775
        {
776
            Some(sn) => sn,
283✔
777
            None => {
UNCOV
778
                debug!("No canonical stacks chain tip hash present");
×
UNCOV
779
                let sn = SortitionDB::get_first_block_snapshot(sortdb.conn())
×
UNCOV
780
                    .expect("BUG: failed to get first-ever block snapshot");
×
UNCOV
781
                sn
×
782
            }
783
        };
784

785
        (
283✔
786
            burnchain_config.reward_cycle_to_block_height(
283✔
787
                burnchain_config
283✔
788
                    .block_height_to_reward_cycle(sn.block_height)
283✔
789
                    .expect("BUG: snapshot preceeds first reward cycle"),
283✔
790
            ),
283✔
791
            sn,
283✔
792
        )
283✔
793
    }
283✔
794

795
    /// Starts the node runloop.
796
    ///
797
    /// This function will block by looping infinitely.
798
    /// It will start the burnchain (separate thread), set-up a channel in
799
    /// charge of coordinating the new blocks coming from the burnchain and
800
    /// the nodes, taking turns on tenures.
801
    ///
802
    /// Returns `Option<NeonGlobals>` so that data can be passed to `NakamotoNode`
803
    pub fn start(
285✔
804
        &mut self,
285✔
805
        burnchain_opt: Option<Burnchain>,
285✔
806
        mut mine_start: u64,
285✔
807
    ) -> Option<Neon2NakaData> {
285✔
808
        let (coordinator_receivers, coordinator_senders) = self
285✔
809
            .coordinator_channels
285✔
810
            .take()
285✔
811
            .expect("Run loop already started, can only start once after initialization.");
285✔
812

813
        Self::setup_termination_handler(self.should_keep_running.clone(), false);
285✔
814

815
        let burnchain_result = Self::instantiate_burnchain_state(
285✔
816
            &self.config,
285✔
817
            self.should_keep_running.clone(),
285✔
818
            burnchain_opt,
285✔
819
            coordinator_senders.clone(),
285✔
820
        );
821

822
        let mut burnchain = match burnchain_result {
284✔
823
            Ok(burnchain_controller) => burnchain_controller,
284✔
824
            Err(burnchain_error::ShutdownInitiated) => {
825
                info!("Exiting stacks-node");
1✔
826
                return None;
1✔
827
            }
UNCOV
828
            Err(e) => {
×
UNCOV
829
                error!("Error initializing burnchain: {e}");
×
UNCOV
830
                info!("Exiting stacks-node");
×
UNCOV
831
                return None;
×
832
            }
833
        };
834

835
        let burnchain_config = burnchain.get_burnchain();
284✔
836
        self.burnchain = Some(burnchain_config.clone());
284✔
837

838
        // can we mine?
839
        let is_miner = self.check_is_miner(&mut burnchain);
284✔
840
        self.is_miner = Some(is_miner);
284✔
841

842
        // relayer linkup
843
        let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER);
284✔
844

845
        // set up globals so other subsystems can instantiate off of the runloop state.
846
        let globals = Globals::new(
284✔
847
            coordinator_senders,
284✔
848
            self.get_miner_status(),
284✔
849
            relay_send,
284✔
850
            self.counters.clone(),
284✔
851
            self.pox_watchdog_comms.clone(),
284✔
852
            self.should_keep_running.clone(),
284✔
853
            mine_start,
284✔
854
            LeaderKeyRegistrationState::default(),
284✔
855
        );
856
        self.set_globals(globals.clone());
284✔
857

858
        // have headers; boot up the chains coordinator and instantiate the chain state
859
        let coordinator_thread_handle = self.spawn_chains_coordinator(
284✔
860
            &burnchain_config,
284✔
861
            coordinator_receivers,
284✔
862
            globals.get_miner_status(),
284✔
863
        );
864
        self.instantiate_pox_watchdog();
284✔
865
        self.start_prometheus();
284✔
866

867
        // We announce a new burn block so that the chains coordinator
868
        // can resume prior work and handle eventual unprocessed sortitions
869
        // stored during a previous session.
870
        globals.coord().announce_new_burn_block();
284✔
871

872
        // Make sure at least one sortition has happened, and make sure it's globally available
873
        let sortdb = burnchain.sortdb_mut();
284✔
874
        let (rc_aligned_height, sn) =
284✔
875
            RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config);
284✔
876

877
        let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height {
284✔
878
            // need at least one sortition to happen.
879
            burnchain
283✔
880
                .wait_for_sortitions(globals.coord().clone(), sn.block_height + 1)
283✔
881
                .expect("Unable to get burnchain tip")
283✔
882
                .block_snapshot
283✔
883
        } else {
884
            sn
1✔
885
        };
886

887
        globals.set_last_sortition(burnchain_tip_snapshot);
284✔
888

889
        // Boot up the p2p network and relayer, and figure out how many sortitions we have so far
890
        // (it could be non-zero if the node is resuming from chainstate)
891
        let mut node = StacksNode::spawn(self, globals.clone(), relay_recv);
284✔
892

893
        // Wait for all pending sortitions to process
894
        let burnchain_db = burnchain_config
284✔
895
            .open_burnchain_db(true)
284✔
896
            .expect("FATAL: failed to open burnchain DB");
284✔
897
        let burnchain_db_tip = burnchain_db
284✔
898
            .get_canonical_chain_tip()
284✔
899
            .expect("FATAL: failed to query burnchain DB");
284✔
900
        let mut burnchain_tip = burnchain
284✔
901
            .wait_for_sortitions(globals.coord().clone(), burnchain_db_tip.block_height)
284✔
902
            .expect("Unable to get burnchain tip");
284✔
903

904
        // Start the runloop
905
        debug!("Runloop: Begin run loop");
284✔
906
        self.counters.bump_blocks_processed();
284✔
907

908
        let mut sortition_db_height = rc_aligned_height;
284✔
909
        let mut burnchain_height = sortition_db_height;
284✔
910

911
        // prepare to fetch the first reward cycle!
912
        debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}");
284✔
913

914
        let mut last_tenure_sortition_height = 0;
284✔
915

916
        loop {
917
            if !globals.keep_running() {
377,304✔
918
                // The p2p thread relies on the same atomic_bool, it will
919
                // discontinue its execution after completing its ongoing runloop epoch.
920
                info!("Terminating p2p process");
147✔
921
                info!("Terminating relayer");
147✔
922
                info!("Terminating chains-coordinator");
147✔
923

924
                globals.coord().stop_chains_coordinator();
147✔
925
                coordinator_thread_handle.join().unwrap();
147✔
926
                let peer_network = node.join();
147✔
927

928
                // Data that will be passed to Nakamoto run loop
929
                // Only gets transferred on clean shutdown of neon run loop
930
                let data_to_naka = Neon2NakaData::new(globals, peer_network);
147✔
931

932
                info!("Exiting stacks-node");
147✔
933
                break Some(data_to_naka);
147✔
934
            }
377,157✔
935

936
            let remote_chain_height = burnchain.get_headers_height() - 1;
377,157✔
937

938
            // wait until it's okay to process the next reward cycle's sortitions.
939
            let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait(
377,157✔
940
                &burnchain_config,
377,157✔
941
                &burnchain_tip,
377,157✔
942
                remote_chain_height,
377,157✔
943
            ) {
377,157✔
944
                Ok(x) => x,
377,071✔
945
                Err(e) => {
86✔
946
                    debug!("Runloop: PoX sync wait routine aborted: {e:?}");
86✔
947
                    continue;
86✔
948
                }
949
            };
950

951
            // calculate burnchain sync percentage
952
            let percent: f64 = if remote_chain_height > 0 {
377,071✔
953
                burnchain_tip.block_snapshot.block_height as f64 / remote_chain_height as f64
377,071✔
954
            } else {
UNCOV
955
                0.0
×
956
            };
957

958
            // Download each burnchain block and process their sortitions.  This, in turn, will
959
            // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and
960
            // process them.  This loop runs for one reward cycle, so that the next pass of the
961
            // runloop will cause the PoX sync watchdog to wait until it believes that the node has
962
            // obtained all the Stacks blocks it can.
963
            debug!(
377,071✔
964
                "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})",
UNCOV
965
                burnchain_config
×
UNCOV
966
                    .block_height_to_reward_cycle(target_burnchain_block_height)
×
UNCOV
967
                    .expect("FATAL: target burnchain block height does not have a reward cycle");
×
968
                "total_burn_sync_percent" => %percent,
UNCOV
969
                "local_burn_height" => burnchain_tip.block_snapshot.block_height,
×
UNCOV
970
                "remote_tip_height" => remote_chain_height
×
971
            );
972

973
            loop {
974
                if !globals.keep_running() {
377,122✔
975
                    break;
51✔
976
                }
377,071✔
977

978
                let (next_burnchain_tip, tip_burnchain_height) =
377,020✔
979
                    match burnchain.sync(Some(target_burnchain_block_height)) {
377,071✔
980
                        Ok(x) => x,
377,020✔
981
                        Err(e) => {
51✔
982
                            warn!("Runloop: Burnchain controller stopped: {e}");
51✔
983
                            continue;
51✔
984
                        }
985
                    };
986

987
                // *now* we know the burnchain height
988
                burnchain_tip = next_burnchain_tip;
377,020✔
989
                burnchain_height = tip_burnchain_height;
377,020✔
990

991
                let sortition_tip = &burnchain_tip.block_snapshot.sortition_id;
377,020✔
992
                let next_sortition_height = burnchain_tip.block_snapshot.block_height;
377,020✔
993

994
                if next_sortition_height != last_tenure_sortition_height {
377,020✔
995
                    info!(
11,687✔
996
                        "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}"
997
                    );
998
                }
365,333✔
999

1000
                if next_sortition_height > sortition_db_height {
377,020✔
1001
                    debug!(
11,524✔
1002
                        "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}"
1003
                    );
1004

1005
                    debug!("Runloop: block mining until we process all sortitions");
11,524✔
1006
                    signal_mining_blocked(globals.get_miner_status());
11,524✔
1007

1008
                    // first, let's process all blocks in (sortition_db_height, next_sortition_height]
1009
                    for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) {
63,338✔
1010
                        // stop mining so we can advance the sortition DB and so our
1011
                        // ProcessTenure() directive (sent by relayer_sortition_notify() below)
1012
                        // will be unblocked.
1013

1014
                        let block = {
63,338✔
1015
                            let ic = burnchain.sortdb_ref().index_conn();
63,338✔
1016
                            SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip)
63,338✔
1017
                                .unwrap()
63,338✔
1018
                                .expect(
63,338✔
1019
                                    "Failed to find block in fork processed by burnchain indexer",
63,338✔
1020
                                )
1021
                        };
1022

1023
                        let sortition_id = &block.sortition_id;
63,338✔
1024

1025
                        // Have the node process the new block, that can include, or not, a sortition.
1026
                        node.process_burnchain_state(
63,338✔
1027
                            self.config(),
63,338✔
1028
                            burnchain.sortdb_mut(),
63,338✔
1029
                            sortition_id,
63,338✔
1030
                            ibd,
63,338✔
1031
                        );
1032

1033
                        // Now, tell the relayer to check if it won a sortition during this block,
1034
                        // and, if so, to process and advertize the block.  This is basically a
1035
                        // no-op during boot-up.
1036
                        //
1037
                        // _this will block if the relayer's buffer is full_
1038
                        if !node.relayer_sortition_notify() {
63,338✔
1039
                            // First check if we were supposed to cleanly exit
1040
                            if !globals.keep_running() {
67✔
1041
                                // The p2p thread relies on the same atomic_bool, it will
1042
                                // discontinue its execution after completing its ongoing runloop epoch.
1043
                                info!("Terminating p2p process");
67✔
1044
                                info!("Terminating relayer");
67✔
1045
                                info!("Terminating chains-coordinator");
67✔
1046

1047
                                globals.coord().stop_chains_coordinator();
67✔
1048
                                coordinator_thread_handle.join().unwrap();
67✔
1049
                                let peer_network = node.join();
67✔
1050

1051
                                // Data that will be passed to Nakamoto run loop
1052
                                // Only gets transferred on clean shutdown of neon run loop
1053
                                let data_to_naka = Neon2NakaData::new(globals, peer_network);
67✔
1054

1055
                                info!("Exiting stacks-node");
67✔
1056
                                return Some(data_to_naka);
67✔
1057
                            }
×
1058
                            // relayer hung up, exit.
1059
                            error!("Runloop: Block relayer and miner hung up, exiting.");
×
UNCOV
1060
                            return None;
×
1061
                        }
63,271✔
1062
                    }
1063

1064
                    debug!("Runloop: enable miner after processing sortitions");
11,457✔
1065
                    signal_mining_ready(globals.get_miner_status());
11,457✔
1066

1067
                    debug!(
11,457✔
1068
                        "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})"
1069
                    );
1070

1071
                    sortition_db_height = next_sortition_height;
11,457✔
1072
                } else if ibd {
365,496✔
UNCOV
1073
                    // drive block processing after we reach the burnchain tip.
×
UNCOV
1074
                    // we may have downloaded all the blocks already,
×
1075
                    // so we can't rely on the relayer alone to
×
UNCOV
1076
                    // drive it.
×
UNCOV
1077
                    globals.coord().announce_new_stacks_block();
×
1078
                }
365,496✔
1079

1080
                if burnchain_height >= target_burnchain_block_height
376,953✔
1081
                    || burnchain_height >= remote_chain_height
38✔
1082
                {
1083
                    break;
376,953✔
UNCOV
1084
                }
×
1085
            }
1086

1087
            if sortition_db_height >= burnchain_height && !ibd {
377,004✔
1088
                let canonical_stacks_tip_height =
365,466✔
1089
                    SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn())
365,466✔
1090
                        .map(|snapshot| snapshot.canonical_stacks_tip_height)
365,466✔
1091
                        .unwrap_or(0);
365,466✔
1092
                if canonical_stacks_tip_height < mine_start {
365,466✔
UNCOV
1093
                    info!(
×
1094
                        "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip"
1095
                    );
1096
                } else {
1097
                    // once we've synced to the chain tip once, don't apply this check again.
1098
                    //  this prevents a possible corner case in the event of a PoX fork.
1099
                    mine_start = 0;
365,466✔
1100
                    globals.set_start_mining_height_if_zero(sortition_db_height);
365,466✔
1101

1102
                    // at tip, and not downloading. proceed to mine.
1103
                    if last_tenure_sortition_height != sortition_db_height {
365,466✔
1104
                        if is_miner {
8,064✔
1105
                            info!(
8,062✔
1106
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks"
1107
                            );
1108
                        } else {
1109
                            info!(
2✔
1110
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}."
1111
                            );
1112
                        }
1113
                        last_tenure_sortition_height = sortition_db_height;
8,064✔
1114
                    }
357,402✔
1115

1116
                    if !node.relayer_issue_tenure(ibd) {
365,466✔
1117
                        // First check if we were supposed to cleanly exit
1118
                        if !globals.keep_running() {
70✔
1119
                            // The p2p thread relies on the same atomic_bool, it will
1120
                            // discontinue its execution after completing its ongoing runloop epoch.
1121
                            info!("Terminating p2p process");
70✔
1122
                            info!("Terminating relayer");
70✔
1123
                            info!("Terminating chains-coordinator");
70✔
1124

1125
                            globals.coord().stop_chains_coordinator();
70✔
1126
                            coordinator_thread_handle.join().unwrap();
70✔
1127
                            let peer_network = node.join();
70✔
1128

1129
                            // Data that will be passed to Nakamoto run loop
1130
                            // Only gets transferred on clean shutdown of neon run loop
1131
                            let data_to_naka = Neon2NakaData::new(globals, peer_network);
70✔
1132

1133
                            info!("Exiting stacks-node");
70✔
1134
                            return Some(data_to_naka);
70✔
UNCOV
1135
                        }
×
1136
                        // relayer hung up, exit.
UNCOV
1137
                        error!("Runloop: Block relayer and miner hung up, exiting.");
×
UNCOV
1138
                        break None;
×
1139
                    }
161,526✔
1140
                }
1141
            }
11,538✔
1142
        }
1143
    }
285✔
1144
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc