• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

stacks-network / stacks-core / 23943169302

03 Apr 2026 10:28AM UTC coverage: 77.573% (-8.1%) from 85.712%
23943169302

Pull #7076

github

7f2377
web-flow
Merge bb87ecec2 into c529ad924
Pull Request #7076: feat: sortition side-table copy and validation

3743 of 4318 new or added lines in 19 files covered. (86.68%)

19304 existing lines in 182 files now uncovered.

172097 of 221852 relevant lines covered (77.57%)

7722182.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.94
/stacks-node/src/run_loop/neon.rs
1
#[cfg(test)]
2
use std::sync::atomic::AtomicU64;
3
use std::sync::atomic::{AtomicBool, Ordering};
4
use std::sync::mpsc::sync_channel;
5
use std::sync::{Arc, Mutex};
6
use std::thread;
7
use std::thread::JoinHandle;
8

9
use libc;
10
use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType};
11
use stacks::burnchains::{Burnchain, Error as burnchain_error};
12
use stacks::chainstate::burn::db::sortdb::SortitionDB;
13
use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash};
14
use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers};
15
use stacks::chainstate::coordinator::{
16
    migrate_chainstate_dbs, ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication,
17
    Error as coord_error,
18
};
19
use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState};
20
use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus};
21
use stacks::core::StacksEpochId;
22
use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment};
23
#[cfg(test)]
24
use stacks::util::tests::TestFlag;
25
use stacks::util_lib::db::Error as db_error;
26
use stacks_common::deps_common::ctrlc as termination;
27
use stacks_common::deps_common::ctrlc::SignalId;
28
use stacks_common::types::PublicKey;
29
use stacks_common::util::hash::Hash160;
30
use stx_genesis::GenesisData;
31

32
use super::RunLoopCallbacks;
33
use crate::burnchains::{make_bitcoin_indexer, Error};
34
use crate::globals::NeonGlobals as Globals;
35
use crate::monitoring::{start_serving_monitoring_metrics, MonitoringError};
36
use crate::neon_node::{
37
    LeaderKeyRegistrationState, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER,
38
};
39
use crate::node::{
40
    get_account_balances, get_account_lockups, get_names, get_namespaces,
41
    use_test_genesis_chainstate,
42
};
43
use crate::run_loop::boot_nakamoto::Neon2NakaData;
44
use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms};
45
use crate::{
46
    run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain,
47
};
48

49
pub const STDERR: i32 = 2;
50

51
#[cfg(test)]
52
#[derive(Clone, Default)]
53
pub struct RunLoopField<T>(pub Arc<Mutex<T>>);
54

55
#[cfg(not(test))]
56
#[derive(Clone, Default)]
57
pub struct RunLoopField<T>(pub std::marker::PhantomData<T>);
58

59
#[cfg(test)]
60
#[derive(Clone)]
61
pub struct RunLoopCounter(pub Arc<AtomicU64>);
62

63
#[cfg(not(test))]
64
#[derive(Clone)]
65
pub struct RunLoopCounter();
66

67
impl Default for RunLoopCounter {
68
    #[cfg(test)]
69
    fn default() -> Self {
21✔
70
        RunLoopCounter(Arc::new(AtomicU64::new(0)))
21✔
71
    }
21✔
72
    #[cfg(not(test))]
73
    fn default() -> Self {
74
        Self()
75
    }
76
}
77

78
impl<T: Clone> RunLoopField<Option<T>> {
79
    #[cfg(test)]
UNCOV
80
    pub fn get(&self) -> T {
×
UNCOV
81
        self.0.lock().unwrap().clone().unwrap()
×
UNCOV
82
    }
×
83
}
84

85
impl RunLoopCounter {
86
    #[cfg(test)]
UNCOV
87
    pub fn get(&self) -> u64 {
×
UNCOV
88
        self.0.load(Ordering::SeqCst)
×
UNCOV
89
    }
×
90

91
    #[cfg(test)]
UNCOV
92
    pub fn load(&self, ordering: Ordering) -> u64 {
×
UNCOV
93
        self.0.load(ordering)
×
UNCOV
94
    }
×
95
}
96

97
#[cfg(test)]
98
impl std::ops::Deref for RunLoopCounter {
99
    type Target = Arc<AtomicU64>;
100

UNCOV
101
    fn deref(&self) -> &Self::Target {
×
UNCOV
102
        &self.0
×
UNCOV
103
    }
×
104
}
105

106
#[derive(Clone, Default)]
107
pub struct Counters {
108
    pub blocks_processed: RunLoopCounter,
109
    pub microblocks_processed: RunLoopCounter,
110
    pub missed_tenures: RunLoopCounter,
111
    pub missed_microblock_tenures: RunLoopCounter,
112
    pub cancelled_commits: RunLoopCounter,
113

114
    pub sortitions_processed: RunLoopCounter,
115

116
    pub naka_submitted_vrfs: RunLoopCounter,
117
    /// the number of submitted commits
118
    pub neon_submitted_commits: RunLoopCounter,
119
    /// the burn block height when the last commit was submitted
120
    pub neon_submitted_commit_last_burn_height: RunLoopCounter,
121
    pub naka_submitted_commits: RunLoopCounter,
122
    /// the burn block height when the last commit was submitted
123
    pub naka_submitted_commit_last_burn_height: RunLoopCounter,
124
    pub naka_mined_blocks: RunLoopCounter,
125
    pub naka_rejected_blocks: RunLoopCounter,
126
    pub naka_proposed_blocks: RunLoopCounter,
127
    pub naka_mined_tenures: RunLoopCounter,
128
    pub naka_signer_pushed_blocks: RunLoopCounter,
129
    pub naka_miner_directives: RunLoopCounter,
130
    pub naka_submitted_commit_last_stacks_tip: RunLoopCounter,
131
    pub naka_submitted_commit_last_commit_amount: RunLoopCounter,
132
    pub naka_submitted_commit_last_parent_tenure_id: RunLoopField<Option<ConsensusHash>>,
133

134
    pub naka_miner_current_rejections: RunLoopCounter,
135
    pub naka_miner_current_rejections_timeout_secs: RunLoopCounter,
136

137
    #[cfg(test)]
138
    pub skip_commit_op: TestFlag<bool>,
139
}
140

141
impl Counters {
142
    pub fn new() -> Self {
1✔
143
        Self::default()
1✔
144
    }
1✔
145

146
    #[cfg(test)]
UNCOV
147
    fn inc(ctr: &RunLoopCounter) {
×
UNCOV
148
        ctr.0.fetch_add(1, Ordering::SeqCst);
×
UNCOV
149
    }
×
150

151
    #[cfg(not(test))]
152
    fn inc(_ctr: &RunLoopCounter) {}
153

154
    #[cfg(test)]
UNCOV
155
    fn set(ctr: &RunLoopCounter, value: u64) {
×
UNCOV
156
        ctr.0.store(value, Ordering::SeqCst);
×
UNCOV
157
    }
×
158

159
    #[cfg(not(test))]
160
    fn set(_ctr: &RunLoopCounter, _value: u64) {}
161

162
    #[cfg(test)]
UNCOV
163
    fn update<T: Clone>(ctr: &RunLoopField<Option<T>>, value: &T) {
×
UNCOV
164
        let mut mutex = ctr.0.lock().expect("FATAL: test counter mutext poisoned");
×
UNCOV
165
        let _ = mutex.replace(value.clone());
×
UNCOV
166
    }
×
167

168
    #[cfg(not(test))]
169
    fn update<T: Clone>(_ctr: &RunLoopField<Option<T>>, _value: &T) {}
170

UNCOV
171
    pub fn bump_blocks_processed(&self) {
×
UNCOV
172
        Counters::inc(&self.blocks_processed);
×
UNCOV
173
    }
×
174

UNCOV
175
    pub fn bump_sortitions_processed(&self) {
×
UNCOV
176
        Counters::inc(&self.sortitions_processed);
×
UNCOV
177
    }
×
178

179
    pub fn bump_microblocks_processed(&self) {
×
180
        Counters::inc(&self.microblocks_processed);
×
181
    }
×
182

UNCOV
183
    pub fn bump_missed_tenures(&self) {
×
UNCOV
184
        Counters::inc(&self.missed_tenures);
×
UNCOV
185
    }
×
186

187
    pub fn bump_missed_microblock_tenures(&self) {
×
188
        Counters::inc(&self.missed_microblock_tenures);
×
189
    }
×
190

191
    pub fn bump_cancelled_commits(&self) {
×
192
        Counters::inc(&self.cancelled_commits);
×
193
    }
×
194

UNCOV
195
    pub fn bump_neon_submitted_commits(&self, committed_burn_height: u64) {
×
UNCOV
196
        Counters::inc(&self.neon_submitted_commits);
×
UNCOV
197
        Counters::set(
×
UNCOV
198
            &self.neon_submitted_commit_last_burn_height,
×
UNCOV
199
            committed_burn_height,
×
200
        );
UNCOV
201
    }
×
202

UNCOV
203
    pub fn bump_naka_submitted_vrfs(&self) {
×
UNCOV
204
        Counters::inc(&self.naka_submitted_vrfs);
×
UNCOV
205
    }
×
206

UNCOV
207
    pub fn bump_naka_submitted_commits(
×
UNCOV
208
        &self,
×
UNCOV
209
        committed_burn_height: u64,
×
UNCOV
210
        committed_stacks_height: u64,
×
UNCOV
211
        committed_sats_amount: u64,
×
UNCOV
212
        committed_parent_tenure_id: &ConsensusHash,
×
UNCOV
213
    ) {
×
UNCOV
214
        Counters::inc(&self.naka_submitted_commits);
×
UNCOV
215
        Counters::set(
×
UNCOV
216
            &self.naka_submitted_commit_last_burn_height,
×
UNCOV
217
            committed_burn_height,
×
218
        );
UNCOV
219
        Counters::set(
×
UNCOV
220
            &self.naka_submitted_commit_last_stacks_tip,
×
UNCOV
221
            committed_stacks_height,
×
222
        );
UNCOV
223
        Counters::set(
×
UNCOV
224
            &self.naka_submitted_commit_last_commit_amount,
×
UNCOV
225
            committed_sats_amount,
×
226
        );
UNCOV
227
        Counters::update(
×
UNCOV
228
            &self.naka_submitted_commit_last_parent_tenure_id,
×
UNCOV
229
            committed_parent_tenure_id,
×
230
        );
UNCOV
231
    }
×
232

UNCOV
233
    pub fn bump_naka_mined_blocks(&self) {
×
UNCOV
234
        Counters::inc(&self.naka_mined_blocks);
×
UNCOV
235
    }
×
236

UNCOV
237
    pub fn bump_naka_proposed_blocks(&self) {
×
UNCOV
238
        Counters::inc(&self.naka_proposed_blocks);
×
UNCOV
239
    }
×
240

UNCOV
241
    pub fn bump_naka_rejected_blocks(&self) {
×
UNCOV
242
        Counters::inc(&self.naka_rejected_blocks);
×
UNCOV
243
    }
×
244

UNCOV
245
    pub fn bump_naka_signer_pushed_blocks(&self) {
×
UNCOV
246
        Counters::inc(&self.naka_signer_pushed_blocks);
×
UNCOV
247
    }
×
248

UNCOV
249
    pub fn bump_naka_mined_tenures(&self) {
×
UNCOV
250
        Counters::inc(&self.naka_mined_tenures);
×
UNCOV
251
    }
×
252

UNCOV
253
    pub fn bump_naka_miner_directives(&self) {
×
UNCOV
254
        Counters::inc(&self.naka_miner_directives);
×
UNCOV
255
    }
×
256

UNCOV
257
    pub fn set_microblocks_processed(&self, value: u64) {
×
UNCOV
258
        Counters::set(&self.microblocks_processed, value)
×
UNCOV
259
    }
×
260

UNCOV
261
    pub fn set_miner_current_rejections_timeout_secs(&self, value: u64) {
×
UNCOV
262
        Counters::set(&self.naka_miner_current_rejections_timeout_secs, value)
×
UNCOV
263
    }
×
264

UNCOV
265
    pub fn set_miner_current_rejections(&self, value: u32) {
×
UNCOV
266
        Counters::set(&self.naka_miner_current_rejections, u64::from(value))
×
UNCOV
267
    }
×
268
}
269

270
/// Coordinating a node running in neon mode.
271
pub struct RunLoop {
272
    config: Config,
273
    pub callbacks: RunLoopCallbacks,
274
    globals: Option<Globals>,
275
    counters: Counters,
276
    coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>,
277
    should_keep_running: Arc<AtomicBool>,
278
    event_dispatcher: EventDispatcher,
279
    pox_watchdog: Option<PoxSyncWatchdog>, // can't be instantiated until .start() is called
280
    is_miner: Option<bool>,                // not known until .start() is called
281
    burnchain: Option<Burnchain>,          // not known until .start() is called
282
    pox_watchdog_comms: PoxSyncWatchdogComms,
283
    /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is
284
    /// instantiated (namely, so the test framework can access it).
285
    miner_status: Arc<Mutex<MinerStatus>>,
286
    monitoring_thread: Option<JoinHandle<Result<(), MonitoringError>>>,
287
}
288

289
/// Write to stderr in an async-safe manner.
290
/// See signal-safety(7)
291
fn async_safe_write_stderr(msg: &str) {
×
292
    #[cfg(windows)]
293
    unsafe {
294
        // write(2) inexplicably has a different ABI only on Windows.
295
        libc::write(
296
            STDERR,
297
            msg.as_ptr() as *const libc::c_void,
298
            msg.len() as u32,
299
        );
300
    }
301
    #[cfg(not(windows))]
302
    unsafe {
×
303
        libc::write(STDERR, msg.as_ptr() as *const libc::c_void, msg.len());
×
304
    }
×
305
}
×
306

307
impl RunLoop {
308
    /// Sets up a runloop and node, given a config.
UNCOV
309
    pub fn new(config: Config) -> Self {
×
UNCOV
310
        let channels = CoordinatorCommunication::instantiate();
×
UNCOV
311
        let should_keep_running = Arc::new(AtomicBool::new(true));
×
UNCOV
312
        let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone());
×
UNCOV
313
        let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready(
×
UNCOV
314
            config.burnchain.burn_fee_cap,
×
315
        )));
316

UNCOV
317
        let mut event_dispatcher = EventDispatcher::new(config.get_working_dir());
×
UNCOV
318
        for observer in config.events_observers.iter() {
×
UNCOV
319
            event_dispatcher.register_observer(observer);
×
UNCOV
320
        }
×
UNCOV
321
        event_dispatcher.process_pending_payloads();
×
322

UNCOV
323
        Self {
×
UNCOV
324
            config,
×
UNCOV
325
            globals: None,
×
UNCOV
326
            coordinator_channels: Some(channels),
×
UNCOV
327
            callbacks: RunLoopCallbacks::new(),
×
UNCOV
328
            counters: Counters::default(),
×
UNCOV
329
            should_keep_running,
×
UNCOV
330
            event_dispatcher,
×
UNCOV
331
            pox_watchdog: None,
×
UNCOV
332
            is_miner: None,
×
UNCOV
333
            burnchain: None,
×
UNCOV
334
            pox_watchdog_comms,
×
UNCOV
335
            miner_status,
×
UNCOV
336
            monitoring_thread: None,
×
UNCOV
337
        }
×
UNCOV
338
    }
×
339

UNCOV
340
    pub fn get_globals(&self) -> Globals {
×
UNCOV
341
        self.globals
×
UNCOV
342
            .clone()
×
UNCOV
343
            .expect("FATAL: globals not instantiated")
×
UNCOV
344
    }
×
345

UNCOV
346
    fn set_globals(&mut self, globals: Globals) {
×
UNCOV
347
        self.globals = Some(globals);
×
UNCOV
348
    }
×
349

UNCOV
350
    pub fn get_coordinator_channel(&self) -> Option<CoordinatorChannels> {
×
UNCOV
351
        self.coordinator_channels.as_ref().map(|x| x.1.clone())
×
UNCOV
352
    }
×
353

UNCOV
354
    pub fn get_blocks_processed_arc(&self) -> RunLoopCounter {
×
UNCOV
355
        self.counters.blocks_processed.clone()
×
UNCOV
356
    }
×
357

358
    pub fn get_microblocks_processed_arc(&self) -> RunLoopCounter {
×
359
        self.counters.microblocks_processed.clone()
×
360
    }
×
361

UNCOV
362
    pub fn get_missed_tenures_arc(&self) -> RunLoopCounter {
×
UNCOV
363
        self.counters.missed_tenures.clone()
×
UNCOV
364
    }
×
365

366
    pub fn get_missed_microblock_tenures_arc(&self) -> RunLoopCounter {
×
367
        self.counters.missed_microblock_tenures.clone()
×
368
    }
×
369

370
    pub fn get_cancelled_commits_arc(&self) -> RunLoopCounter {
×
371
        self.counters.cancelled_commits.clone()
×
372
    }
×
373

UNCOV
374
    pub fn get_counters(&self) -> Counters {
×
UNCOV
375
        self.counters.clone()
×
UNCOV
376
    }
×
377

UNCOV
378
    pub fn config(&self) -> &Config {
×
UNCOV
379
        &self.config
×
UNCOV
380
    }
×
381

UNCOV
382
    pub fn get_event_dispatcher(&self) -> EventDispatcher {
×
UNCOV
383
        self.event_dispatcher.clone()
×
UNCOV
384
    }
×
385

UNCOV
386
    pub fn is_miner(&self) -> bool {
×
UNCOV
387
        self.is_miner.unwrap_or(false)
×
UNCOV
388
    }
×
389

UNCOV
390
    pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms {
×
UNCOV
391
        self.pox_watchdog_comms.clone()
×
UNCOV
392
    }
×
393

UNCOV
394
    pub fn get_termination_switch(&self) -> Arc<AtomicBool> {
×
UNCOV
395
        self.should_keep_running.clone()
×
UNCOV
396
    }
×
397

UNCOV
398
    pub fn get_burnchain(&self) -> Burnchain {
×
UNCOV
399
        self.burnchain
×
UNCOV
400
            .clone()
×
UNCOV
401
            .expect("FATAL: tried to get runloop burnchain before calling .start()")
×
UNCOV
402
    }
×
403

UNCOV
404
    pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog {
×
UNCOV
405
        self.pox_watchdog
×
UNCOV
406
            .as_mut()
×
UNCOV
407
            .expect("FATAL: tried to get PoX watchdog before calling .start()")
×
UNCOV
408
    }
×
409

UNCOV
410
    pub fn get_miner_status(&self) -> Arc<Mutex<MinerStatus>> {
×
UNCOV
411
        self.miner_status.clone()
×
UNCOV
412
    }
×
413

414
    /// Set up termination handler.  Have a signal set the `should_keep_running` atomic bool to
415
    /// false.  Panics of called more than once.
UNCOV
416
    pub fn setup_termination_handler(keep_running_writer: Arc<AtomicBool>, allow_err: bool) {
×
UNCOV
417
        let install = termination::set_handler(move |sig_id| match sig_id {
×
418
            SignalId::Bus => {
419
                let msg = "Caught SIGBUS; crashing immediately and dumping core\n";
×
420
                async_safe_write_stderr(msg);
×
421
                unsafe {
422
                    libc::abort();
×
423
                }
424
            }
425
            _ => {
×
426
                let msg = format!("Graceful termination request received (signal `{sig_id}`), will complete the ongoing runloop cycles and terminate\n");
×
427
                async_safe_write_stderr(&msg);
×
428
                keep_running_writer.store(false, Ordering::SeqCst);
×
429
            }
×
430
        });
×
431

UNCOV
432
        if let Err(e) = install {
×
433
            // integration tests can do this
UNCOV
434
            if cfg!(test) || allow_err {
×
UNCOV
435
                info!("Error setting up signal handler, may have already been set");
×
436
            } else {
437
                panic!("FATAL: error setting termination handler - {e}");
×
438
            }
UNCOV
439
        }
×
UNCOV
440
    }
×
441

442
    /// Seconds to wait before retrying UTXO check during startup
443
    const UTXO_RETRY_INTERVAL: u64 = 10;
444
    /// Number of times to retry UTXO check during startup
445
    const UTXO_RETRY_COUNT: u64 = 6;
446

447
    /// Determine if we're the miner.
448
    /// If there's a network error, then assume that we're not a miner.
UNCOV
449
    fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool {
×
UNCOV
450
        if self.config.node.miner {
×
451
            // If we are mock mining, then we don't need to check for UTXOs and
452
            // we can just return true.
UNCOV
453
            if self.config.get_node_config(false).mock_mining {
×
UNCOV
454
                return true;
×
UNCOV
455
            }
×
UNCOV
456
            let keychain = Keychain::default(self.config.node.seed.clone());
×
UNCOV
457
            let mut op_signer = keychain.generate_op_signer();
×
UNCOV
458
            if let Err(e) = burnchain.create_wallet_if_dne() {
×
459
                warn!("Error when creating wallet: {e:?}");
×
UNCOV
460
            }
×
UNCOV
461
            let mut btc_addrs = vec![(
×
UNCOV
462
                StacksEpochId::Epoch2_05,
×
UNCOV
463
                // legacy
×
UNCOV
464
                BitcoinAddress::from_bytes_legacy(
×
UNCOV
465
                    self.config.burnchain.get_bitcoin_network().1,
×
UNCOV
466
                    LegacyBitcoinAddressType::PublicKeyHash,
×
UNCOV
467
                    &Hash160::from_data(&op_signer.get_public_key().to_bytes()).0,
×
UNCOV
468
                )
×
UNCOV
469
                .expect("FATAL: failed to construct legacy bitcoin address"),
×
UNCOV
470
            )];
×
UNCOV
471
            if self.config.miner.segwit {
×
472
                btc_addrs.push((
×
473
                    StacksEpochId::Epoch21,
×
474
                    // segwit p2wpkh
×
475
                    BitcoinAddress::from_bytes_segwit_p2wpkh(
×
476
                        self.config.burnchain.get_bitcoin_network().1,
×
477
                        &Hash160::from_data(&op_signer.get_public_key().to_bytes_compressed()).0,
×
478
                    )
×
479
                    .expect("FATAL: failed to construct segwit p2wpkh address"),
×
480
                ));
×
UNCOV
481
            }
×
482

483
            // retry UTXO check a few times, in case bitcoind is still starting up
UNCOV
484
            for _ in 0..Self::UTXO_RETRY_COUNT {
×
UNCOV
485
                for (epoch_id, btc_addr) in &btc_addrs {
×
UNCOV
486
                    info!("Miner node: checking UTXOs at address: {btc_addr}");
×
UNCOV
487
                    let utxos =
×
UNCOV
488
                        burnchain.get_utxos(*epoch_id, &op_signer.get_public_key(), 1, None, 0);
×
UNCOV
489
                    if utxos.is_none() {
×
UNCOV
490
                        warn!("UTXOs not found for {btc_addr}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {btc_addr} (importaddress)");
×
491
                    } else {
UNCOV
492
                        info!("UTXOs found - will run as a Miner node");
×
UNCOV
493
                        return true;
×
494
                    }
495
                }
UNCOV
496
                thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL));
×
497
            }
UNCOV
498
            panic!("No UTXOs found, exiting");
×
499
        } else {
UNCOV
500
            info!("Will run as a Follower node");
×
UNCOV
501
            false
×
502
        }
UNCOV
503
    }
×
504

505
    /// Instantiate the burnchain client and databases.
506
    /// Fetches headers and instantiates the burnchain.
507
    /// Panics on failure.
UNCOV
508
    pub fn instantiate_burnchain_state(
×
UNCOV
509
        config: &Config,
×
UNCOV
510
        should_keep_running: Arc<AtomicBool>,
×
UNCOV
511
        burnchain_opt: Option<Burnchain>,
×
UNCOV
512
        coordinator_senders: CoordinatorChannels,
×
UNCOV
513
    ) -> Result<BitcoinRegtestController, burnchain_error> {
×
514
        // Initialize and start the burnchain.
UNCOV
515
        let mut burnchain_controller = BitcoinRegtestController::with_burnchain(
×
UNCOV
516
            config.clone(),
×
UNCOV
517
            Some(coordinator_senders),
×
UNCOV
518
            burnchain_opt,
×
UNCOV
519
            Some(should_keep_running.clone()),
×
520
        );
521

UNCOV
522
        let burnchain = burnchain_controller.get_burnchain();
×
UNCOV
523
        let epochs = burnchain_controller.get_stacks_epochs();
×
524

525
        // sanity check -- epoch data must be valid
UNCOV
526
        Config::assert_valid_epoch_settings(&burnchain, &epochs);
×
527

528
        // Upgrade chainstate databases if they exist already
529
        // NOTE: this has to be done before the subsequent call to
530
        // `burnchain_controller.connect_dbs()` below!
UNCOV
531
        match migrate_chainstate_dbs(
×
UNCOV
532
            &epochs,
×
UNCOV
533
            &burnchain,
×
UNCOV
534
            &config.get_burn_db_file_path(),
×
UNCOV
535
            &config.get_chainstate_path_str(),
×
UNCOV
536
            Some(config.node.get_marf_opts()),
×
537
        ) {
UNCOV
538
            Ok(_) => {}
×
539
            Err(coord_error::DBError(db_error::TooOldForEpoch)) => {
540
                error!(
×
541
                    "FATAL: chainstate database(s) are not compatible with the current system epoch"
542
                );
543
                panic!();
×
544
            }
545
            Err(e) => {
×
546
                panic!("FATAL: unable to query filesystem or databases: {e:?}");
×
547
            }
548
        }
549

UNCOV
550
        info!("Start syncing Bitcoin headers, feel free to grab a cup of coffee, this can take a while");
×
551

UNCOV
552
        let burnchain_config = burnchain_controller.get_burnchain();
×
UNCOV
553
        let target_burnchain_block_height = match burnchain_config
×
UNCOV
554
            .get_highest_burnchain_block()
×
UNCOV
555
            .expect("FATAL: failed to access burnchain database")
×
556
        {
UNCOV
557
            Some(burnchain_tip) => {
×
558
                // database exists already, and has blocks -- just sync to its tip.
UNCOV
559
                let target_height = burnchain_tip.block_height + 1;
×
UNCOV
560
                debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {target_height}", burnchain_tip.block_height);
×
UNCOV
561
                target_height
×
562
            }
563
            None => {
564
                // database does not exist yet
UNCOV
565
                let target_height = 1.max(burnchain_config.first_block_height + 1);
×
UNCOV
566
                debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {target_height}");
×
UNCOV
567
                target_height
×
568
            }
569
        };
570

UNCOV
571
        burnchain_controller
×
UNCOV
572
            .start(Some(target_burnchain_block_height))
×
UNCOV
573
            .map_err(|e| {
×
UNCOV
574
                if matches!(e, Error::CoordinatorClosed)
×
UNCOV
575
                    && !should_keep_running.load(Ordering::SeqCst)
×
576
                {
UNCOV
577
                    info!("Shutdown initiated during burnchain initialization: {e}");
×
UNCOV
578
                    return burnchain_error::ShutdownInitiated;
×
579
                }
×
580
                error!("Burnchain controller stopped: {e}");
×
581
                panic!();
×
UNCOV
582
            })?;
×
583

584
        // if the chainstate DBs don't exist, this will instantiate them
UNCOV
585
        if let Err(e) = burnchain_controller.connect_dbs() {
×
586
            error!("Failed to connect to burnchain databases: {e}");
×
587
            panic!();
×
UNCOV
588
        };
×
589

590
        // TODO (hack) instantiate the sortdb in the burnchain
UNCOV
591
        let _ = burnchain_controller.sortdb_mut();
×
UNCOV
592
        Ok(burnchain_controller)
×
UNCOV
593
    }
×
594

595
    /// Boot up the stacks chainstate.
596
    /// Instantiate the chainstate and push out the boot receipts to observers
597
    /// This is only public so we can test it.
UNCOV
598
    pub fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState {
×
UNCOV
599
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
×
600

601
        // load up genesis balances
UNCOV
602
        let initial_balances = self
×
UNCOV
603
            .config
×
UNCOV
604
            .initial_balances
×
UNCOV
605
            .iter()
×
UNCOV
606
            .map(|e| (e.address.clone(), e.amount))
×
UNCOV
607
            .collect();
×
608

609
        // instantiate chainstate
UNCOV
610
        let mut boot_data = ChainStateBootData {
×
UNCOV
611
            initial_balances,
×
UNCOV
612
            post_flight_callback: None,
×
UNCOV
613
            first_burnchain_block_hash: burnchain_config.first_block_hash.clone(),
×
UNCOV
614
            first_burnchain_block_height: burnchain_config.first_block_height as u32,
×
UNCOV
615
            first_burnchain_block_timestamp: burnchain_config.first_block_timestamp,
×
UNCOV
616
            pox_constants: burnchain_config.pox_constants.clone(),
×
UNCOV
617
            get_bulk_initial_lockups: Some(Box::new(move || {
×
UNCOV
618
                get_account_lockups(use_test_genesis_data)
×
UNCOV
619
            })),
×
UNCOV
620
            get_bulk_initial_balances: Some(Box::new(move || {
×
UNCOV
621
                get_account_balances(use_test_genesis_data)
×
UNCOV
622
            })),
×
UNCOV
623
            get_bulk_initial_namespaces: Some(Box::new(move || {
×
UNCOV
624
                get_namespaces(use_test_genesis_data)
×
UNCOV
625
            })),
×
UNCOV
626
            get_bulk_initial_names: Some(Box::new(move || get_names(use_test_genesis_data))),
×
627
        };
628

UNCOV
629
        info!("About to call open_and_exec");
×
UNCOV
630
        let (chain_state_db, receipts) = StacksChainState::open_and_exec(
×
UNCOV
631
            self.config.is_mainnet(),
×
UNCOV
632
            self.config.burnchain.chain_id,
×
UNCOV
633
            &self.config.get_chainstate_path_str(),
×
UNCOV
634
            Some(&mut boot_data),
×
UNCOV
635
            Some(self.config.node.get_marf_opts()),
×
UNCOV
636
        )
×
UNCOV
637
        .unwrap();
×
UNCOV
638
        run_loop::announce_boot_receipts(
×
UNCOV
639
            &mut self.event_dispatcher,
×
UNCOV
640
            &chain_state_db,
×
UNCOV
641
            &burnchain_config.pox_constants,
×
UNCOV
642
            &receipts,
×
643
        );
UNCOV
644
        chain_state_db
×
UNCOV
645
    }
×
646

647
    /// Instantiate the Stacks chain state and start the chains coordinator thread.
648
    /// Returns the coordinator thread handle, and the receiving end of the coordinator's atlas
649
    /// attachment channel.
UNCOV
650
    fn spawn_chains_coordinator(
×
UNCOV
651
        &mut self,
×
UNCOV
652
        burnchain_config: &Burnchain,
×
UNCOV
653
        coordinator_receivers: CoordinatorReceivers,
×
UNCOV
654
        miner_status: Arc<Mutex<MinerStatus>>,
×
UNCOV
655
    ) -> JoinHandle<()> {
×
UNCOV
656
        let use_test_genesis_data = use_test_genesis_chainstate(&self.config);
×
657

658
        // load up genesis Atlas attachments
UNCOV
659
        let mut atlas_config = AtlasConfig::new(self.config.is_mainnet());
×
UNCOV
660
        let genesis_attachments = GenesisData::new(use_test_genesis_data)
×
UNCOV
661
            .read_name_zonefiles()
×
UNCOV
662
            .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec()))
×
UNCOV
663
            .collect();
×
UNCOV
664
        atlas_config.genesis_attachments = Some(genesis_attachments);
×
665

UNCOV
666
        let chain_state_db = self.boot_chainstate(burnchain_config);
×
667

668
        // NOTE: re-instantiate AtlasConfig so we don't have to keep the genesis attachments around
UNCOV
669
        let moved_atlas_config = self.config.atlas.clone();
×
UNCOV
670
        let moved_config = self.config.clone();
×
UNCOV
671
        let moved_burnchain_config = burnchain_config.clone();
×
UNCOV
672
        let coordinator_dispatcher = self.event_dispatcher.clone();
×
UNCOV
673
        let atlas_db = AtlasDB::connect(
×
UNCOV
674
            moved_atlas_config.clone(),
×
UNCOV
675
            &self.config.get_atlas_db_file_path(),
×
676
            true,
677
        )
UNCOV
678
        .expect("Failed to connect Atlas DB during startup");
×
UNCOV
679
        let coordinator_indexer =
×
UNCOV
680
            make_bitcoin_indexer(&self.config, Some(self.should_keep_running.clone()));
×
681

UNCOV
682
        let coordinator_thread_handle = thread::Builder::new()
×
UNCOV
683
            .name(format!(
×
684
                "chains-coordinator-{}",
UNCOV
685
                &moved_config.node.rpc_bind
×
686
            ))
UNCOV
687
            .stack_size(BLOCK_PROCESSOR_STACK_SIZE)
×
UNCOV
688
            .spawn(move || {
×
UNCOV
689
                debug!(
×
690
                    "chains-coordinator thread ID is {:?}",
691
                    thread::current().id()
×
692
                );
UNCOV
693
                let mut cost_estimator = moved_config.make_cost_estimator();
×
UNCOV
694
                let mut fee_estimator = moved_config.make_fee_estimator();
×
695

UNCOV
696
                let coord_config = ChainsCoordinatorConfig {
×
UNCOV
697
                    txindex: moved_config.node.txindex,
×
UNCOV
698
                };
×
UNCOV
699
                ChainsCoordinator::run(
×
UNCOV
700
                    coord_config,
×
UNCOV
701
                    chain_state_db,
×
UNCOV
702
                    moved_burnchain_config,
×
UNCOV
703
                    &coordinator_dispatcher,
×
UNCOV
704
                    coordinator_receivers,
×
UNCOV
705
                    moved_atlas_config,
×
UNCOV
706
                    cost_estimator.as_deref_mut(),
×
UNCOV
707
                    fee_estimator.as_deref_mut(),
×
UNCOV
708
                    miner_status,
×
UNCOV
709
                    coordinator_indexer,
×
UNCOV
710
                    atlas_db,
×
711
                );
UNCOV
712
            })
×
UNCOV
713
            .expect("FATAL: failed to start chains coordinator thread");
×
714

UNCOV
715
        coordinator_thread_handle
×
UNCOV
716
    }
×
717

718
    /// Instantiate the PoX watchdog
UNCOV
719
    fn instantiate_pox_watchdog(&mut self) {
×
UNCOV
720
        let pox_watchdog = PoxSyncWatchdog::new(&self.config, self.pox_watchdog_comms.clone())
×
UNCOV
721
            .expect("FATAL: failed to instantiate PoX sync watchdog");
×
UNCOV
722
        self.pox_watchdog = Some(pox_watchdog);
×
UNCOV
723
    }
×
724

725
    /// Start Prometheus logging
UNCOV
726
    fn start_prometheus(&mut self) {
×
UNCOV
727
        let Some(prometheus_bind) = self.config.node.prometheus_bind.clone() else {
×
UNCOV
728
            return;
×
729
        };
UNCOV
730
        let monitoring_thread = thread::Builder::new()
×
UNCOV
731
            .name("prometheus".to_string())
×
UNCOV
732
            .spawn(move || {
×
UNCOV
733
                debug!("prometheus thread ID is {:?}", thread::current().id());
×
UNCOV
734
                start_serving_monitoring_metrics(prometheus_bind)
×
UNCOV
735
            })
×
UNCOV
736
            .expect("FATAL: failed to start monitoring thread");
×
737

UNCOV
738
        self.monitoring_thread.replace(monitoring_thread);
×
UNCOV
739
    }
×
740

UNCOV
741
    pub fn take_monitoring_thread(&mut self) -> Option<JoinHandle<Result<(), MonitoringError>>> {
×
UNCOV
742
        self.monitoring_thread.take()
×
UNCOV
743
    }
×
744

745
    /// Get the sortition DB's highest block height, aligned to a reward cycle boundary, and the
746
    /// highest sortition.
747
    /// Returns (height at rc start, sortition)
UNCOV
748
    fn get_reward_cycle_sortition_db_height(
×
UNCOV
749
        sortdb: &SortitionDB,
×
UNCOV
750
        burnchain_config: &Burnchain,
×
UNCOV
751
    ) -> (u64, BlockSnapshot) {
×
UNCOV
752
        let (stacks_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())
×
UNCOV
753
            .expect("BUG: failed to load canonical stacks chain tip hash");
×
754

UNCOV
755
        let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_ch)
×
UNCOV
756
            .expect("BUG: failed to query sortition DB")
×
757
        {
UNCOV
758
            Some(sn) => sn,
×
759
            None => {
760
                debug!("No canonical stacks chain tip hash present");
×
761
                let sn = SortitionDB::get_first_block_snapshot(sortdb.conn())
×
762
                    .expect("BUG: failed to get first-ever block snapshot");
×
763
                sn
×
764
            }
765
        };
766

UNCOV
767
        (
×
UNCOV
768
            burnchain_config.reward_cycle_to_block_height(
×
UNCOV
769
                burnchain_config
×
UNCOV
770
                    .block_height_to_reward_cycle(sn.block_height)
×
UNCOV
771
                    .expect("BUG: snapshot preceeds first reward cycle"),
×
UNCOV
772
            ),
×
UNCOV
773
            sn,
×
UNCOV
774
        )
×
UNCOV
775
    }
×
776

777
    /// Starts the node runloop.
778
    ///
779
    /// This function will block by looping infinitely.
780
    /// It will start the burnchain (separate thread), set-up a channel in
781
    /// charge of coordinating the new blocks coming from the burnchain and
782
    /// the nodes, taking turns on tenures.
783
    ///
784
    /// Returns `Option<NeonGlobals>` so that data can be passed to `NakamotoNode`
UNCOV
785
    pub fn start(
×
UNCOV
786
        &mut self,
×
UNCOV
787
        burnchain_opt: Option<Burnchain>,
×
UNCOV
788
        mut mine_start: u64,
×
UNCOV
789
    ) -> Option<Neon2NakaData> {
×
UNCOV
790
        let (coordinator_receivers, coordinator_senders) = self
×
UNCOV
791
            .coordinator_channels
×
UNCOV
792
            .take()
×
UNCOV
793
            .expect("Run loop already started, can only start once after initialization.");
×
794

UNCOV
795
        Self::setup_termination_handler(self.should_keep_running.clone(), false);
×
796

UNCOV
797
        let burnchain_result = Self::instantiate_burnchain_state(
×
UNCOV
798
            &self.config,
×
UNCOV
799
            self.should_keep_running.clone(),
×
UNCOV
800
            burnchain_opt,
×
UNCOV
801
            coordinator_senders.clone(),
×
802
        );
803

UNCOV
804
        let mut burnchain = match burnchain_result {
×
UNCOV
805
            Ok(burnchain_controller) => burnchain_controller,
×
806
            Err(burnchain_error::ShutdownInitiated) => {
UNCOV
807
                info!("Exiting stacks-node");
×
UNCOV
808
                return None;
×
809
            }
810
            Err(e) => {
×
811
                error!("Error initializing burnchain: {e}");
×
812
                info!("Exiting stacks-node");
×
813
                return None;
×
814
            }
815
        };
816

UNCOV
817
        let burnchain_config = burnchain.get_burnchain();
×
UNCOV
818
        self.burnchain = Some(burnchain_config.clone());
×
819

820
        // can we mine?
UNCOV
821
        let is_miner = self.check_is_miner(&mut burnchain);
×
UNCOV
822
        self.is_miner = Some(is_miner);
×
823

824
        // relayer linkup
UNCOV
825
        let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER);
×
826

827
        // set up globals so other subsystems can instantiate off of the runloop state.
UNCOV
828
        let globals = Globals::new(
×
UNCOV
829
            coordinator_senders,
×
UNCOV
830
            self.get_miner_status(),
×
UNCOV
831
            relay_send,
×
UNCOV
832
            self.counters.clone(),
×
UNCOV
833
            self.pox_watchdog_comms.clone(),
×
UNCOV
834
            self.should_keep_running.clone(),
×
UNCOV
835
            mine_start,
×
UNCOV
836
            LeaderKeyRegistrationState::default(),
×
837
        );
UNCOV
838
        self.set_globals(globals.clone());
×
839

840
        // have headers; boot up the chains coordinator and instantiate the chain state
UNCOV
841
        let coordinator_thread_handle = self.spawn_chains_coordinator(
×
UNCOV
842
            &burnchain_config,
×
UNCOV
843
            coordinator_receivers,
×
UNCOV
844
            globals.get_miner_status(),
×
845
        );
UNCOV
846
        self.instantiate_pox_watchdog();
×
UNCOV
847
        self.start_prometheus();
×
848

849
        // We announce a new burn block so that the chains coordinator
850
        // can resume prior work and handle eventual unprocessed sortitions
851
        // stored during a previous session.
UNCOV
852
        globals.coord().announce_new_burn_block();
×
853

854
        // Make sure at least one sortition has happened, and make sure it's globally available
UNCOV
855
        let sortdb = burnchain.sortdb_mut();
×
UNCOV
856
        let (rc_aligned_height, sn) =
×
UNCOV
857
            RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config);
×
858

UNCOV
859
        let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height {
×
860
            // need at least one sortition to happen.
UNCOV
861
            burnchain
×
UNCOV
862
                .wait_for_sortitions(globals.coord().clone(), sn.block_height + 1)
×
UNCOV
863
                .expect("Unable to get burnchain tip")
×
UNCOV
864
                .block_snapshot
×
865
        } else {
UNCOV
866
            sn
×
867
        };
868

UNCOV
869
        globals.set_last_sortition(burnchain_tip_snapshot);
×
870

871
        // Boot up the p2p network and relayer, and figure out how many sortitions we have so far
872
        // (it could be non-zero if the node is resuming from chainstate)
UNCOV
873
        let mut node = StacksNode::spawn(self, globals.clone(), relay_recv);
×
874

875
        // Wait for all pending sortitions to process
UNCOV
876
        let burnchain_db = burnchain_config
×
UNCOV
877
            .open_burnchain_db(true)
×
UNCOV
878
            .expect("FATAL: failed to open burnchain DB");
×
UNCOV
879
        let burnchain_db_tip = burnchain_db
×
UNCOV
880
            .get_canonical_chain_tip()
×
UNCOV
881
            .expect("FATAL: failed to query burnchain DB");
×
UNCOV
882
        let mut burnchain_tip = burnchain
×
UNCOV
883
            .wait_for_sortitions(globals.coord().clone(), burnchain_db_tip.block_height)
×
UNCOV
884
            .expect("Unable to get burnchain tip");
×
885

886
        // Start the runloop
UNCOV
887
        debug!("Runloop: Begin run loop");
×
UNCOV
888
        self.counters.bump_blocks_processed();
×
889

UNCOV
890
        let mut sortition_db_height = rc_aligned_height;
×
UNCOV
891
        let mut burnchain_height = sortition_db_height;
×
892

893
        // prepare to fetch the first reward cycle!
UNCOV
894
        debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}");
×
895

UNCOV
896
        let mut last_tenure_sortition_height = 0;
×
897

898
        loop {
UNCOV
899
            if !globals.keep_running() {
×
900
                // The p2p thread relies on the same atomic_bool, it will
901
                // discontinue its execution after completing its ongoing runloop epoch.
UNCOV
902
                info!("Terminating p2p process");
×
UNCOV
903
                info!("Terminating relayer");
×
UNCOV
904
                info!("Terminating chains-coordinator");
×
905

UNCOV
906
                globals.coord().stop_chains_coordinator();
×
UNCOV
907
                coordinator_thread_handle.join().unwrap();
×
UNCOV
908
                let peer_network = node.join();
×
909

910
                // Data that will be passed to Nakamoto run loop
911
                // Only gets transferred on clean shutdown of neon run loop
UNCOV
912
                let data_to_naka = Neon2NakaData::new(globals, peer_network);
×
913

UNCOV
914
                info!("Exiting stacks-node");
×
UNCOV
915
                break Some(data_to_naka);
×
UNCOV
916
            }
×
917

UNCOV
918
            let remote_chain_height = burnchain.get_headers_height() - 1;
×
919

920
            // wait until it's okay to process the next reward cycle's sortitions.
UNCOV
921
            let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait(
×
UNCOV
922
                &burnchain_config,
×
UNCOV
923
                &burnchain_tip,
×
UNCOV
924
                remote_chain_height,
×
UNCOV
925
            ) {
×
UNCOV
926
                Ok(x) => x,
×
UNCOV
927
                Err(e) => {
×
UNCOV
928
                    debug!("Runloop: PoX sync wait routine aborted: {e:?}");
×
UNCOV
929
                    continue;
×
930
                }
931
            };
932

933
            // calculate burnchain sync percentage
UNCOV
934
            let percent: f64 = if remote_chain_height > 0 {
×
UNCOV
935
                burnchain_tip.block_snapshot.block_height as f64 / remote_chain_height as f64
×
936
            } else {
UNCOV
937
                0.0
×
938
            };
939

940
            // Download each burnchain block and process their sortitions.  This, in turn, will
941
            // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and
942
            // process them.  This loop runs for one reward cycle, so that the next pass of the
943
            // runloop will cause the PoX sync watchdog to wait until it believes that the node has
944
            // obtained all the Stacks blocks it can.
UNCOV
945
            debug!(
×
946
                "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})",
947
                burnchain_config
×
948
                    .block_height_to_reward_cycle(target_burnchain_block_height)
×
949
                    .expect("FATAL: target burnchain block height does not have a reward cycle");
×
950
                "total_burn_sync_percent" => %percent,
951
                "local_burn_height" => burnchain_tip.block_snapshot.block_height,
×
952
                "remote_tip_height" => remote_chain_height
×
953
            );
954

955
            loop {
UNCOV
956
                if !globals.keep_running() {
×
UNCOV
957
                    break;
×
UNCOV
958
                }
×
959

UNCOV
960
                let (next_burnchain_tip, tip_burnchain_height) =
×
UNCOV
961
                    match burnchain.sync(Some(target_burnchain_block_height)) {
×
UNCOV
962
                        Ok(x) => x,
×
UNCOV
963
                        Err(e) => {
×
UNCOV
964
                            warn!("Runloop: Burnchain controller stopped: {e}");
×
UNCOV
965
                            continue;
×
966
                        }
967
                    };
968

969
                // *now* we know the burnchain height
UNCOV
970
                burnchain_tip = next_burnchain_tip;
×
UNCOV
971
                burnchain_height = tip_burnchain_height;
×
972

UNCOV
973
                let sortition_tip = &burnchain_tip.block_snapshot.sortition_id;
×
UNCOV
974
                let next_sortition_height = burnchain_tip.block_snapshot.block_height;
×
975

UNCOV
976
                if next_sortition_height != last_tenure_sortition_height {
×
UNCOV
977
                    info!(
×
978
                        "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}"
979
                    );
UNCOV
980
                }
×
981

UNCOV
982
                if next_sortition_height > sortition_db_height {
×
UNCOV
983
                    debug!(
×
984
                        "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}"
985
                    );
986

UNCOV
987
                    debug!("Runloop: block mining until we process all sortitions");
×
UNCOV
988
                    signal_mining_blocked(globals.get_miner_status());
×
989

990
                    // first, let's process all blocks in (sortition_db_height, next_sortition_height]
UNCOV
991
                    for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) {
×
992
                        // stop mining so we can advance the sortition DB and so our
993
                        // ProcessTenure() directive (sent by relayer_sortition_notify() below)
994
                        // will be unblocked.
995

UNCOV
996
                        let block = {
×
UNCOV
997
                            let ic = burnchain.sortdb_ref().index_conn();
×
UNCOV
998
                            SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip)
×
UNCOV
999
                                .unwrap()
×
UNCOV
1000
                                .expect(
×
UNCOV
1001
                                    "Failed to find block in fork processed by burnchain indexer",
×
1002
                                )
1003
                        };
1004

UNCOV
1005
                        let sortition_id = &block.sortition_id;
×
1006

1007
                        // Have the node process the new block, that can include, or not, a sortition.
UNCOV
1008
                        node.process_burnchain_state(
×
UNCOV
1009
                            self.config(),
×
UNCOV
1010
                            burnchain.sortdb_mut(),
×
UNCOV
1011
                            sortition_id,
×
UNCOV
1012
                            ibd,
×
1013
                        );
1014

1015
                        // Now, tell the relayer to check if it won a sortition during this block,
1016
                        // and, if so, to process and advertize the block.  This is basically a
1017
                        // no-op during boot-up.
1018
                        //
1019
                        // _this will block if the relayer's buffer is full_
UNCOV
1020
                        if !node.relayer_sortition_notify() {
×
1021
                            // First check if we were supposed to cleanly exit
UNCOV
1022
                            if !globals.keep_running() {
×
1023
                                // The p2p thread relies on the same atomic_bool, it will
1024
                                // discontinue its execution after completing its ongoing runloop epoch.
UNCOV
1025
                                info!("Terminating p2p process");
×
UNCOV
1026
                                info!("Terminating relayer");
×
UNCOV
1027
                                info!("Terminating chains-coordinator");
×
1028

UNCOV
1029
                                globals.coord().stop_chains_coordinator();
×
UNCOV
1030
                                coordinator_thread_handle.join().unwrap();
×
UNCOV
1031
                                let peer_network = node.join();
×
1032

1033
                                // Data that will be passed to Nakamoto run loop
1034
                                // Only gets transferred on clean shutdown of neon run loop
UNCOV
1035
                                let data_to_naka = Neon2NakaData::new(globals, peer_network);
×
1036

UNCOV
1037
                                info!("Exiting stacks-node");
×
UNCOV
1038
                                return Some(data_to_naka);
×
1039
                            }
×
1040
                            // relayer hung up, exit.
1041
                            error!("Runloop: Block relayer and miner hung up, exiting.");
×
1042
                            return None;
×
UNCOV
1043
                        }
×
1044
                    }
1045

UNCOV
1046
                    debug!("Runloop: enable miner after processing sortitions");
×
UNCOV
1047
                    signal_mining_ready(globals.get_miner_status());
×
1048

UNCOV
1049
                    debug!(
×
1050
                        "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})"
1051
                    );
1052

UNCOV
1053
                    sortition_db_height = next_sortition_height;
×
UNCOV
1054
                } else if ibd {
×
1055
                    // drive block processing after we reach the burnchain tip.
×
1056
                    // we may have downloaded all the blocks already,
×
1057
                    // so we can't rely on the relayer alone to
×
1058
                    // drive it.
×
1059
                    globals.coord().announce_new_stacks_block();
×
UNCOV
1060
                }
×
1061

UNCOV
1062
                if burnchain_height >= target_burnchain_block_height
×
UNCOV
1063
                    || burnchain_height >= remote_chain_height
×
1064
                {
UNCOV
1065
                    break;
×
UNCOV
1066
                }
×
1067
            }
1068

UNCOV
1069
            if sortition_db_height >= burnchain_height && !ibd {
×
UNCOV
1070
                let canonical_stacks_tip_height =
×
UNCOV
1071
                    SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn())
×
UNCOV
1072
                        .map(|snapshot| snapshot.canonical_stacks_tip_height)
×
UNCOV
1073
                        .unwrap_or(0);
×
UNCOV
1074
                if canonical_stacks_tip_height < mine_start {
×
1075
                    info!(
×
1076
                        "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip"
1077
                    );
1078
                } else {
1079
                    // once we've synced to the chain tip once, don't apply this check again.
1080
                    //  this prevents a possible corner case in the event of a PoX fork.
UNCOV
1081
                    mine_start = 0;
×
UNCOV
1082
                    globals.set_start_mining_height_if_zero(sortition_db_height);
×
1083

1084
                    // at tip, and not downloading. proceed to mine.
UNCOV
1085
                    if last_tenure_sortition_height != sortition_db_height {
×
UNCOV
1086
                        if is_miner {
×
UNCOV
1087
                            info!(
×
1088
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks"
1089
                            );
1090
                        } else {
UNCOV
1091
                            info!(
×
1092
                                "Runloop: Synchronized full burnchain up to height {sortition_db_height}."
1093
                            );
1094
                        }
UNCOV
1095
                        last_tenure_sortition_height = sortition_db_height;
×
UNCOV
1096
                    }
×
1097

UNCOV
1098
                    if !node.relayer_issue_tenure(ibd) {
×
1099
                        // First check if we were supposed to cleanly exit
UNCOV
1100
                        if !globals.keep_running() {
×
1101
                            // The p2p thread relies on the same atomic_bool, it will
1102
                            // discontinue its execution after completing its ongoing runloop epoch.
UNCOV
1103
                            info!("Terminating p2p process");
×
UNCOV
1104
                            info!("Terminating relayer");
×
UNCOV
1105
                            info!("Terminating chains-coordinator");
×
1106

UNCOV
1107
                            globals.coord().stop_chains_coordinator();
×
UNCOV
1108
                            coordinator_thread_handle.join().unwrap();
×
UNCOV
1109
                            let peer_network = node.join();
×
1110

1111
                            // Data that will be passed to Nakamoto run loop
1112
                            // Only gets transferred on clean shutdown of neon run loop
UNCOV
1113
                            let data_to_naka = Neon2NakaData::new(globals, peer_network);
×
1114

UNCOV
1115
                            info!("Exiting stacks-node");
×
UNCOV
1116
                            return Some(data_to_naka);
×
1117
                        }
×
1118
                        // relayer hung up, exit.
1119
                        error!("Runloop: Block relayer and miner hung up, exiting.");
×
1120
                        break None;
×
UNCOV
1121
                    }
×
1122
                }
UNCOV
1123
            }
×
1124
        }
UNCOV
1125
    }
×
1126
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc