• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

stacks-network / stacks-core / 25903914664-1

15 May 2026 06:28AM UTC coverage: 47.122% (-38.8%) from 85.959%
25903914664-1

Pull #7199

github

94e391
web-flow
Merge 109f2828c into 1c7b8e6ac
Pull Request #7199: Feat: L1 and L2 early unlocks, updating signer

103343 of 219309 relevant lines covered (47.12%)

12880462.62 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

72.91
/stacks-node/src/nakamoto_node/relayer.rs
1
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
2
// Copyright (C) 2020-2023 Stacks Open Internet Foundation
3
//
4
// This program is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8
//
9
// This program is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13
//
14
// You should have received a copy of the GNU General Public License
15
// along with this program.  If not, see <http://www.gnu.org/licenses/>.
16
use core::fmt;
17
use std::io::Read;
18
use std::sync::atomic::{AtomicBool, Ordering};
19
use std::sync::mpsc::{Receiver, RecvTimeoutError};
20
use std::sync::Arc;
21
#[cfg(test)]
22
use std::sync::LazyLock;
23
use std::thread::JoinHandle;
24
use std::time::{Duration, Instant};
25
use std::{fs, thread};
26

27
use rand::{thread_rng, Rng};
28
use stacks::burnchains::{Burnchain, Txid};
29
use stacks::chainstate::burn::db::sortdb::{FindIter, SortitionDB};
30
use stacks::chainstate::burn::operations::leader_block_commit::{
31
    RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS,
32
};
33
use stacks::chainstate::burn::operations::{
34
    BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp,
35
};
36
use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash};
37
use stacks::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients;
38
use stacks::chainstate::nakamoto::{NakamotoBlockHeader, NakamotoChainState};
39
use stacks::chainstate::stacks::address::PoxAddress;
40
use stacks::chainstate::stacks::db::StacksChainState;
41
use stacks::chainstate::stacks::miner::{
42
    set_mining_spend_amount, signal_mining_blocked, signal_mining_ready,
43
};
44
use stacks::chainstate::stacks::Error as ChainstateError;
45
use stacks::config::BurnchainConfig;
46
use stacks::core::mempool::MemPoolDB;
47
use stacks::core::STACKS_EPOCH_LATEST_MARKER;
48
use stacks::monitoring::increment_stx_blocks_mined_counter;
49
use stacks::net::db::LocalPeer;
50
use stacks::net::p2p::NetworkHandle;
51
use stacks::net::relay::Relayer;
52
use stacks::net::NetworkResult;
53
use stacks::util_lib::db::Error as DbError;
54
use stacks_common::types::chainstate::{
55
    BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed,
56
};
57
use stacks_common::types::StacksEpochId;
58
use stacks_common::util::get_epoch_time_ms;
59
use stacks_common::util::hash::Hash160;
60
#[cfg(test)]
61
use stacks_common::util::tests::TestFlag;
62
use stacks_common::util::vrf::VRFPublicKey;
63

64
use super::miner::MinerReason;
65
use super::{
66
    Config, Error as NakamotoNodeError, EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE,
67
};
68
use crate::burnchains::BurnchainController;
69
use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective};
70
use crate::neon_node::{
71
    fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState,
72
};
73
use crate::run_loop::nakamoto::{Globals, RunLoop};
74
use crate::run_loop::RegisteredKey;
75
use crate::BitcoinRegtestController;
76

77
#[cfg(test)]
78
/// Mutex to stall the relayer thread right before it creates a miner thread.
79
pub static TEST_MINER_THREAD_STALL: LazyLock<TestFlag<bool>> = LazyLock::new(TestFlag::default);
80

81
#[cfg(test)]
82
/// Mutex to stall the miner thread right after it starts up (does not block the relayer thread)
83
pub static TEST_MINER_THREAD_START_STALL: LazyLock<TestFlag<bool>> =
84
    LazyLock::new(TestFlag::default);
85

86
#[cfg(test)]
87
/// Test flag to set the tip for the miner to commit to
88
pub static TEST_MINER_COMMIT_TIP: LazyLock<TestFlag<Option<(ConsensusHash, BlockHeaderHash)>>> =
89
    LazyLock::new(TestFlag::default);
90

91
/// Command types for the Nakamoto relayer thread, issued to it by other threads
92
#[allow(clippy::large_enum_variant)]
93
pub enum RelayerDirective {
94
    /// Handle some new data that arrived on the network (such as blocks, transactions, and
95
    HandleNetResult(NetworkResult),
96
    /// A new burn block has been processed by the SortitionDB, check if this miner won sortition,
97
    ///  and if so, start the miner thread
98
    ProcessedBurnBlock(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash),
99
    /// Either a new burn block has been processed (without a miner active yet) or a
100
    ///  nakamoto tenure's first block has been processed, so the relayer should issue
101
    ///  a block commit
102
    IssueBlockCommit(ConsensusHash, BlockHeaderHash),
103
    /// Try to register a VRF public key
104
    RegisterKey(BlockSnapshot),
105
    /// Stop the relayer thread
106
    Exit,
107
}
108

109
impl fmt::Display for RelayerDirective {
110
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
×
111
        match self {
×
112
            RelayerDirective::HandleNetResult(_) => write!(f, "HandleNetResult"),
×
113
            RelayerDirective::ProcessedBurnBlock(_, _, _) => write!(f, "ProcessedBurnBlock"),
×
114
            RelayerDirective::IssueBlockCommit(_, _) => write!(f, "IssueBlockCommit"),
×
115
            RelayerDirective::RegisterKey(_) => write!(f, "RegisterKey"),
×
116
            RelayerDirective::Exit => write!(f, "Exit"),
×
117
        }
118
    }
×
119
}
120

121
/// Last commitment data
122
/// This represents the tenure that the last-sent block-commit committed to.
123
pub struct LastCommit {
124
    /// block-commit sent
125
    block_commit: LeaderBlockCommitOp,
126
    /// the sortition tip at the time the block-commit was sent
127
    burn_tip: BlockSnapshot,
128
    /// the stacks tip at the time the block-commit was sent
129
    stacks_tip: StacksBlockId,
130
    /// the tenure consensus hash for the tip's tenure
131
    tenure_consensus_hash: ConsensusHash,
132
    /// the start-block hash of the tip's tenure
133
    #[allow(dead_code)]
134
    start_block_hash: BlockHeaderHash,
135
    /// What is the epoch in which this was sent?
136
    epoch_id: StacksEpochId,
137
    /// commit txid (to be filled in on submission)
138
    txid: Option<Txid>,
139
}
140

141
/// Timer used to check whether or not a burnchain view change has
142
///  waited long enough to issue a burn commit without a tenure change
143
enum BurnBlockCommitTimer {
144
    /// The timer hasn't been set: we aren't currently waiting to submit a commit
145
    NotSet,
146
    /// The timer is set, and has been set for a particular burn view
147
    Set {
148
        start_time: Instant,
149
        /// This is the canonical sortition at the time that the
150
        ///  timer began. This is used to make sure we aren't reusing
151
        ///  the timeout between sortitions
152
        burn_tip: ConsensusHash,
153
    },
154
}
155

156
impl BurnBlockCommitTimer {
157
    /// Check if the timer has expired (and was set).
158
    /// If the timer was not set, then set it.
159
    ///
160
    /// Returns true if the timer expired
161
    fn is_ready(&mut self, current_burn_tip: &ConsensusHash, timeout: &Duration) -> bool {
45,083✔
162
        let needs_reset = match self {
45,083✔
163
            BurnBlockCommitTimer::NotSet => true,
222✔
164
            BurnBlockCommitTimer::Set {
165
                start_time,
44,861✔
166
                burn_tip,
44,861✔
167
            } => {
168
                if burn_tip != current_burn_tip {
44,861✔
169
                    true
1,113✔
170
                } else {
171
                    if start_time.elapsed() > *timeout {
43,748✔
172
                        // timer expired and was pointed at the correct burn tip
173
                        // so we can just return is_ready here
174
                        return true;
7,573✔
175
                    }
36,175✔
176
                    // timer didn't expire, but the burn tip was correct, so
177
                    //  we don't need to reset the timer
178
                    false
36,175✔
179
                }
180
            }
181
        };
182
        if needs_reset {
37,510✔
183
            info!(
1,335✔
184
                "Starting new tenure timeout";
185
                "timeout_secs" => timeout.as_secs(),
1,335✔
186
                "burn_tip_ch" => %current_burn_tip
187
            );
188
            *self = Self::Set {
1,335✔
189
                burn_tip: current_burn_tip.clone(),
1,335✔
190
                start_time: Instant::now(),
1,335✔
191
            };
1,335✔
192
        }
36,175✔
193

194
        debug!(
37,510✔
195
            "Waiting for tenure timeout before issuing commit";
196
            "elapsed_secs" => self.elapsed_secs(),
×
197
            "burn_tip_ch" => %current_burn_tip
198
        );
199

200
        false
37,510✔
201
    }
45,083✔
202

203
    /// At what time, if set, would this timer be ready?
204
    fn deadline(&self, timeout: &Duration) -> Option<Instant> {
37,510✔
205
        match self {
37,510✔
206
            BurnBlockCommitTimer::NotSet => None,
×
207
            BurnBlockCommitTimer::Set { start_time, .. } => Some(*start_time + *timeout),
37,510✔
208
        }
209
    }
37,510✔
210

211
    /// How much time has elapsed on the current timer?
212
    fn elapsed_secs(&self) -> u64 {
×
213
        match self {
×
214
            BurnBlockCommitTimer::NotSet => 0,
×
215
            BurnBlockCommitTimer::Set { start_time, .. } => start_time.elapsed().as_secs(),
×
216
        }
217
    }
×
218
}
219

220
impl LastCommit {
221
    pub fn new(
2,906✔
222
        commit: LeaderBlockCommitOp,
2,906✔
223
        burn_tip: BlockSnapshot,
2,906✔
224
        stacks_tip: StacksBlockId,
2,906✔
225
        tenure_consensus_hash: ConsensusHash,
2,906✔
226
        start_block_hash: BlockHeaderHash,
2,906✔
227
        epoch_id: StacksEpochId,
2,906✔
228
    ) -> Self {
2,906✔
229
        Self {
2,906✔
230
            block_commit: commit,
2,906✔
231
            burn_tip,
2,906✔
232
            stacks_tip,
2,906✔
233
            tenure_consensus_hash,
2,906✔
234
            start_block_hash,
2,906✔
235
            epoch_id,
2,906✔
236
            txid: None,
2,906✔
237
        }
2,906✔
238
    }
2,906✔
239

240
    /// Get the commit
241
    pub fn get_block_commit(&self) -> &LeaderBlockCommitOp {
2,906✔
242
        &self.block_commit
2,906✔
243
    }
2,906✔
244

245
    /// What's the parent tenure's tenure-start block hash?
246
    pub fn parent_tenure_id(&self) -> StacksBlockId {
×
247
        StacksBlockId(self.block_commit.block_header_hash.0)
×
248
    }
×
249

250
    /// What's the stacks tip at the time of commit?
251
    pub fn get_stacks_tip(&self) -> &StacksBlockId {
×
252
        &self.stacks_tip
×
253
    }
×
254

255
    /// What's the burn tip at the time of commit?
256
    pub fn get_burn_tip(&self) -> &BlockSnapshot {
188,541✔
257
        &self.burn_tip
188,541✔
258
    }
188,541✔
259

260
    /// What's the epoch in which this was sent?
261
    pub fn get_epoch_id(&self) -> &StacksEpochId {
2,906✔
262
        &self.epoch_id
2,906✔
263
    }
2,906✔
264

265
    /// Get the tenure ID of the tenure this commit builds on
266
    pub fn get_tenure_id(&self) -> &ConsensusHash {
233,695✔
267
        &self.tenure_consensus_hash
233,695✔
268
    }
233,695✔
269

270
    /// Set our txid
271
    pub fn set_txid(&mut self, txid: &Txid) {
1,910✔
272
        self.txid = Some(txid.clone());
1,910✔
273
    }
1,910✔
274
}
275

276
pub type MinerThreadJoinHandle = JoinHandle<Result<(), NakamotoNodeError>>;
277

278
/// Miner thread join handle, as well as an "abort" flag to force the miner thread to exit when it
279
/// is blocked.
280
pub struct MinerStopHandle {
281
    /// The join handle itself
282
    join_handle: MinerThreadJoinHandle,
283
    /// The relayer-set abort flag
284
    abort_flag: Arc<AtomicBool>,
285
}
286

287
impl MinerStopHandle {
288
    pub fn new(join_handle: MinerThreadJoinHandle, abort_flag: Arc<AtomicBool>) -> Self {
1,647✔
289
        Self {
1,647✔
290
            join_handle,
1,647✔
291
            abort_flag,
1,647✔
292
        }
1,647✔
293
    }
1,647✔
294

295
    /// Get a ref to the inner thread object
296
    pub fn inner_thread(&self) -> &std::thread::Thread {
1,661✔
297
        self.join_handle.thread()
1,661✔
298
    }
1,661✔
299

300
    /// Destroy this stop handle to get the thread join handle
301
    pub fn into_inner(self) -> MinerThreadJoinHandle {
1,419✔
302
        self.join_handle
1,419✔
303
    }
1,419✔
304

305
    /// Stop the inner miner thread.
306
    /// Blocks the miner, and sets the abort flag so that a blocked miner will error out.
307
    pub fn stop(self, globals: &Globals) -> Result<(), NakamotoNodeError> {
1,419✔
308
        let my_id = thread::current().id();
1,419✔
309
        let prior_thread_id = self.inner_thread().id();
1,419✔
310
        debug!(
1,419✔
311
            "[Thread {:?}]: Stopping prior miner thread ID {:?}",
312
            &my_id, &prior_thread_id
×
313
        );
314

315
        self.abort_flag.store(true, Ordering::SeqCst);
1,419✔
316
        globals.block_miner();
1,419✔
317

318
        let prior_miner = self.into_inner();
1,419✔
319
        let prior_miner_result = prior_miner.join().map_err(|_| {
1,419✔
320
            error!("Miner: failed to join prior miner");
×
321
            ChainstateError::MinerAborted
×
322
        })?;
×
323
        debug!("Stopped prior miner thread ID {:?}", &prior_thread_id);
1,419✔
324
        if let Err(e) = prior_miner_result {
1,419✔
325
            // it's okay if the prior miner thread exited with an error.
326
            // in many cases this is expected (i.e., a burnchain block occurred)
327
            // if some error condition should be handled though, this is the place
328
            //  to do that handling.
329
            debug!("Prior mining thread exited with: {e:?}");
1,199✔
330
        }
220✔
331

332
        globals.unblock_miner();
1,419✔
333
        Ok(())
1,419✔
334
    }
1,419✔
335
}
336

337
/// The reason for issuing a tenure extend
338
#[derive(PartialEq, Eq, Debug, Clone)]
339
pub enum TenureExtendReason {
340
    /// There was an empty sortition
341
    EmptySortition,
342
    /// There was a bad sortition winner
343
    BadSortitionWinner,
344
    /// We are waiting for the current winner to produce a block.
345
    UnresponsiveWinner,
346
}
347

348
/// Information necessary to determine when to extend a tenure
349
#[derive(Clone)]
350
pub struct TenureExtendTime {
351
    /// The time at which we determined that we should tenure-extend
352
    time: Instant,
353
    /// The amount of time we should wait before tenure-extending
354
    timeout: Duration,
355
    /// The reason for tenure-extending
356
    reason: TenureExtendReason,
357
}
358

359
impl TenureExtendTime {
360
    /// Create a new `TenureExtendTime` for an UnresponsiveWinner with the specified `timeout`
361
    pub fn unresponsive_winner(timeout: Duration) -> Self {
72✔
362
        Self {
72✔
363
            time: Instant::now(),
72✔
364
            timeout,
72✔
365
            reason: TenureExtendReason::UnresponsiveWinner,
72✔
366
        }
72✔
367
    }
72✔
368

369
    /// Create a new `TenureExtendTime` with the provided `reason` and no `timeout`
370
    pub fn immediate(reason: TenureExtendReason) -> Self {
55✔
371
        Self {
55✔
372
            time: Instant::now(),
55✔
373
            timeout: Duration::from_millis(0),
55✔
374
            reason,
55✔
375
        }
55✔
376
    }
55✔
377

378
    /// Should we attempt to tenure-extend?
379
    pub fn should_extend(&self) -> bool {
22,544✔
380
        // We set the time, but have we waited long enough?
381
        self.time.elapsed() > self.timeout
22,544✔
382
    }
22,544✔
383

384
    // Amount of time elapsed since we decided to tenure-extend
385
    pub fn elapsed(&self) -> Duration {
×
386
        self.time.elapsed()
×
387
    }
×
388

389
    // The timeout specified when we decided to tenure-extend
390
    pub fn timeout(&self) -> Duration {
×
391
        self.timeout
×
392
    }
×
393

394
    /// The reason for tenure-extending
395
    pub fn reason(&self) -> &TenureExtendReason {
289✔
396
        &self.reason
289✔
397
    }
289✔
398

399
    /// Update the timeout for this `TenureExtendTime` and reset the time
400
    pub fn refresh(&mut self, timeout: Duration) {
289✔
401
        self.timeout = timeout;
289✔
402
        self.time = Instant::now();
289✔
403
    }
289✔
404
}
405

406
/// Relayer thread
407
/// * accepts network results and stores blocks and microblocks
408
/// * forwards new blocks, microblocks, and transactions to the p2p thread
409
/// * issues (and re-issues) block commits to participate as a miner
410
/// * processes burnchain state to determine if selected as a miner
411
/// * if mining, runs the miner and broadcasts blocks (via a subordinate MinerThread)
412
pub struct RelayerThread {
413
    /// Node config
414
    pub(crate) config: Config,
415
    /// Handle to the sortition DB
416
    sortdb: SortitionDB,
417
    /// Handle to the chainstate DB
418
    chainstate: StacksChainState,
419
    /// Handle to the mempool DB
420
    mempool: MemPoolDB,
421
    /// Handle to global state and inter-thread communication channels
422
    pub(crate) globals: Globals,
423
    /// Authoritative copy of the keychain state
424
    pub(crate) keychain: Keychain,
425
    /// Burnchian configuration
426
    pub(crate) burnchain: Burnchain,
427
    /// height of last VRF key registration request
428
    last_vrf_key_burn_height: Option<u64>,
429
    /// client to the burnchain (used only for sending block-commits)
430
    pub(crate) bitcoin_controller: BitcoinRegtestController,
431
    /// client to the event dispatcher
432
    pub(crate) event_dispatcher: EventDispatcher,
433
    /// copy of the local peer state
434
    local_peer: LocalPeer,
435
    /// last observed burnchain block height from the p2p thread (obtained from network results)
436
    last_network_block_height: u64,
437
    /// time at which we observed a change in the network block height (epoch time in millis)
438
    last_network_block_height_ts: u128,
439
    /// last observed number of downloader state-machine passes from the p2p thread (obtained from
440
    /// network results)
441
    last_network_download_passes: u64,
442
    /// last observed number of inventory state-machine passes from the p2p thread (obtained from
443
    /// network results)
444
    last_network_inv_passes: u64,
445
    /// minimum number of downloader state-machine passes that must take place before mining (this
446
    /// is used to ensure that the p2p thread attempts to download new Stacks block data before
447
    /// this thread tries to mine a block)
448
    min_network_download_passes: u64,
449
    /// minimum number of inventory state-machine passes that must take place before mining (this
450
    /// is used to ensure that the p2p thread attempts to download new Stacks block data before
451
    /// this thread tries to mine a block)
452
    min_network_inv_passes: u64,
453

454
    /// Inner relayer instance for forwarding broadcasted data back to the p2p thread for dispatch
455
    /// to neighbors
456
    relayer: Relayer,
457

458
    /// handle to the subordinate miner thread
459
    miner_thread: Option<MinerStopHandle>,
460
    /// miner thread's burn view
461
    miner_thread_burn_view: Option<BlockSnapshot>,
462

463
    /// The relayer thread reads directives from the relay_rcv, but it also periodically wakes up
464
    ///  to check if it should issue a block commit or try to register a VRF key
465
    next_initiative: Instant,
466
    is_miner: bool,
467
    /// Information about the last-sent block commit, and the relayer's view of the chain at the
468
    /// time it was sent.
469
    last_committed: Option<LastCommit>,
470
    /// Timeout for waiting for the first block in a tenure before submitting a block commit
471
    new_tenure_timeout: BurnBlockCommitTimer,
472
    /// Time to wait before attempting a tenure extend
473
    tenure_extend_time: Option<TenureExtendTime>,
474
}
475

476
impl RelayerThread {
477
    /// Instantiate relayer thread.
478
    /// Uses `runloop` to obtain globals, config, and `is_miner`` status
479
    pub fn new(
241✔
480
        runloop: &RunLoop,
241✔
481
        local_peer: LocalPeer,
241✔
482
        relayer: Relayer,
241✔
483
        keychain: Keychain,
241✔
484
    ) -> RelayerThread {
241✔
485
        let config = runloop.config().clone();
241✔
486
        let globals = runloop.get_globals();
241✔
487
        let burn_db_path = config.get_burn_db_file_path();
241✔
488
        let is_miner = runloop.is_miner();
241✔
489

490
        let sortdb = SortitionDB::open(
241✔
491
            &burn_db_path,
241✔
492
            true,
493
            runloop.get_burnchain().pox_constants,
241✔
494
            Some(config.node.get_marf_opts()),
241✔
495
        )
496
        .expect("FATAL: failed to open burnchain DB");
241✔
497

498
        let chainstate =
241✔
499
            open_chainstate_with_faults(&config).expect("FATAL: failed to open chainstate DB");
241✔
500

501
        let mempool = config
241✔
502
            .connect_mempool_db()
241✔
503
            .expect("Database failure opening mempool");
241✔
504

505
        let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone());
241✔
506

507
        let next_initiative_delay = config.node.next_initiative_delay;
241✔
508

509
        RelayerThread {
241✔
510
            config,
241✔
511
            sortdb,
241✔
512
            chainstate,
241✔
513
            mempool,
241✔
514
            globals,
241✔
515
            keychain,
241✔
516
            burnchain: runloop.get_burnchain(),
241✔
517
            last_vrf_key_burn_height: None,
241✔
518
            bitcoin_controller,
241✔
519
            event_dispatcher: runloop.get_event_dispatcher(),
241✔
520
            local_peer,
241✔
521

241✔
522
            last_network_block_height: 0,
241✔
523
            last_network_block_height_ts: 0,
241✔
524
            last_network_download_passes: 0,
241✔
525
            min_network_download_passes: 0,
241✔
526
            last_network_inv_passes: 0,
241✔
527
            min_network_inv_passes: 0,
241✔
528

241✔
529
            relayer,
241✔
530

241✔
531
            miner_thread: None,
241✔
532
            miner_thread_burn_view: None,
241✔
533
            is_miner,
241✔
534
            next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay),
241✔
535
            last_committed: None,
241✔
536
            new_tenure_timeout: BurnBlockCommitTimer::NotSet,
241✔
537
            tenure_extend_time: None,
241✔
538
        }
241✔
539
    }
241✔
540

541
    /// Get a handle to the p2p thread
542
    pub fn get_p2p_handle(&self) -> NetworkHandle {
1,405✔
543
        self.relayer.get_p2p_handle()
1,405✔
544
    }
1,405✔
545

546
    /// have we waited for the right conditions under which to start mining a block off of our
547
    /// chain tip?
548
    fn has_waited_for_latest_blocks(&self) -> bool {
282,985✔
549
        // a network download pass took place
550
        self.min_network_download_passes <= self.last_network_download_passes
282,985✔
551
        // we waited long enough for a download pass, but timed out waiting
552
        || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms()
257,043✔
553
        // we're not supposed to wait at all
554
        || !self.config.miner.wait_for_block_download
10,920✔
555
    }
282,985✔
556

557
    /// Handle a NetworkResult from the p2p/http state machine.  Usually this is the act of
558
    /// * preprocessing and storing new blocks and microblocks
559
    /// * relaying blocks, microblocks, and transacctions
560
    /// * updating unconfirmed state views
561
    pub fn process_network_result(&mut self, mut net_result: NetworkResult) {
282,985✔
562
        debug!(
282,985✔
563
            "Relayer: Handle network result (from {})",
564
            net_result.burn_height
565
        );
566

567
        if self.last_network_block_height != net_result.burn_height {
282,985✔
568
            // burnchain advanced; disable mining until we also do a download pass.
1,840✔
569
            self.last_network_block_height = net_result.burn_height;
1,840✔
570
            self.min_network_download_passes = net_result.num_download_passes + 1;
1,840✔
571
            self.min_network_inv_passes = net_result.num_inv_sync_passes + 1;
1,840✔
572
            self.last_network_block_height_ts = get_epoch_time_ms();
1,840✔
573
        }
281,146✔
574

575
        let net_receipts = self
282,985✔
576
            .relayer
282,985✔
577
            .process_network_result(
282,985✔
578
                &self.local_peer,
282,985✔
579
                &mut net_result,
282,985✔
580
                &self.burnchain,
282,985✔
581
                &mut self.sortdb,
282,985✔
582
                &mut self.chainstate,
282,985✔
583
                &mut self.mempool,
282,985✔
584
                self.globals.sync_comms.get_ibd(),
282,985✔
585
                Some(&self.globals.coord_comms),
282,985✔
586
                Some(&self.event_dispatcher),
282,985✔
587
            )
588
            .expect("BUG: failure processing network results");
282,985✔
589

590
        if net_receipts.num_new_blocks > 0 {
282,985✔
591
            // if we received any new block data that could invalidate our view of the chain tip,
592
            // then stop mining until we process it
593
            debug!("Relayer: block mining to process newly-arrived blocks or microblocks");
14✔
594
            signal_mining_blocked(self.globals.get_miner_status());
14✔
595
        }
282,971✔
596

597
        let mempool_txs_added = net_receipts.mempool_txs_added.len();
282,985✔
598
        if mempool_txs_added > 0 {
282,985✔
599
            self.event_dispatcher
2,764✔
600
                .process_new_mempool_txs(net_receipts.mempool_txs_added);
2,764✔
601
        }
280,221✔
602

603
        // Dispatch retrieved attachments, if any.
604
        if net_result.has_attachments() {
282,985✔
605
            self.event_dispatcher
×
606
                .process_new_attachments(&net_result.attachments);
×
607
        }
282,985✔
608

609
        // resume mining if we blocked it, and if we've done the requisite download
610
        // passes
611
        self.last_network_download_passes = net_result.num_download_passes;
282,985✔
612
        self.last_network_inv_passes = net_result.num_inv_sync_passes;
282,985✔
613
        if self.has_waited_for_latest_blocks() {
282,985✔
614
            debug!("Relayer: did a download pass, so unblocking mining");
282,985✔
615
            signal_mining_ready(self.globals.get_miner_status());
282,985✔
616
        }
×
617
    }
282,985✔
618

619
    /// Choose a miner directive for a sortition with a winner.
620
    ///
621
    /// The decision process is a little tricky, because the right decision depends on:
622
    /// * whether or not we won the _given_ sortition (`sn`)
623
    /// * whether or not we won the sortition that started the ongoing Stacks tenure
624
    /// * whether or not the ongoing Stacks tenure is at or descended from the last-winning
625
    /// sortition
626
    ///
627
    /// Specifically:
628
    ///
629
    /// If we won the given sortition `sn`, then we can start mining immediately with a `BlockFound`
630
    /// tenure-change. The exception is if we won the sortition, but the sortition's winning commit
631
    /// does not commit to the ongoing tenure. In this case, we instead extend the current tenure.
632
    ///
633
    /// Otherwise, if we did not win `sn`, if we won the tenure which started the ongoing Stacks tenure
634
    /// (i.e. we're the active miner), then we _may_ start mining after a timeout _if_ the winning
635
    /// miner (not us) fails to submit a `BlockFound` tenure-change block for `sn`.
636
    fn choose_directive_sortition_with_winner(
1,444✔
637
        &mut self,
1,444✔
638
        sn: BlockSnapshot,
1,444✔
639
        mining_pkh: &Hash160,
1,444✔
640
        committed_index_hash: StacksBlockId,
1,444✔
641
    ) -> MinerDirective {
1,444✔
642
        let won_sortition = sn.miner_pk_hash.as_ref() == Some(mining_pkh);
1,444✔
643

644
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
1,444✔
645
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
1,444✔
646
                .expect("FATAL: failed to query sortition DB for stacks tip");
1,444✔
647
        let canonical_stacks_snapshot =
1,444✔
648
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch)
1,444✔
649
                .expect("FATAL: failed to query sortiiton DB for epoch")
1,444✔
650
                .expect("FATAL: no sortition for canonical stacks tip");
1,444✔
651

652
        // If we won the sortition, ensure that the sortition's winning commit actually commits to
653
        // the ongoing tenure. If it does not (i.e. commit is "stale" and points to N-1 when we are
654
        // currently in N), and if we are also the ongoing tenure's miner, then we must not attempt
655
        // a tenure change (which would reorg our own signed blocks). Instead, we should immediately
656
        // extend the tenure.
657
        if won_sortition && !self.config.get_node_config(false).mock_mining {
1,444✔
658
            let canonical_stacks_tip =
1,229✔
659
                StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
1,229✔
660

661
            let commits_to_tip_tenure = Self::sortition_commits_to_stacks_tip_tenure(
1,229✔
662
                &mut self.chainstate,
1,229✔
663
                &canonical_stacks_tip,
1,229✔
664
                &canonical_stacks_snapshot,
1,229✔
665
                &sn,
1,229✔
666
            ).unwrap_or_else(|e| {
1,229✔
667
                warn!(
×
668
                    "Relayer: Failed to determine if winning sortition commits to current tenure: {e:?}";
669
                    "sortition_ch" => %sn.consensus_hash,
670
                    "stacks_tip_ch" => %canonical_stacks_tip_ch
671
                );
672
                false
×
673
            });
×
674

675
            if !commits_to_tip_tenure {
1,229✔
676
                let won_ongoing_tenure_sortition =
14✔
677
                    canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pkh);
14✔
678

679
                if won_ongoing_tenure_sortition {
14✔
680
                    info!(
4✔
681
                        "Relayer: Won sortition, but commit does not target ongoing tenure. Will extend instead of starting a new tenure.";
682
                        "winning_sortition" => %sn.consensus_hash,
683
                        "ongoing_tenure" => %canonical_stacks_snapshot.consensus_hash,
684
                        "commits_to_tip_tenure?" => commits_to_tip_tenure
4✔
685
                    );
686
                    // Extend tenure to the new burn view instead of attempting BlockFound
687
                    return MinerDirective::ContinueTenure {
4✔
688
                        new_burn_view: sn.consensus_hash,
4✔
689
                    };
4✔
690
                }
10✔
691
            }
1,215✔
692
        }
215✔
693

694
        if won_sortition || self.config.get_node_config(false).mock_mining {
1,440✔
695
            // a sortition happenend, and we won
696
            info!("Won sortition; begin tenure.";
1,232✔
697
                    "winning_sortition" => %sn.consensus_hash);
698
            return MinerDirective::BeginTenure {
1,232✔
699
                parent_tenure_start: committed_index_hash,
1,232✔
700
                burnchain_tip: sn.clone(),
1,232✔
701
                election_block: sn,
1,232✔
702
                late: false,
1,232✔
703
            };
1,232✔
704
        }
208✔
705

706
        // a sortition happened, but we didn't win. Check if we won the ongoing tenure.
707
        debug!(
208✔
708
            "Relayer: did not win sortition {}, so stopping tenure",
709
            &sn.sortition
×
710
        );
711

712
        let won_ongoing_tenure_sortition =
208✔
713
            canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pkh);
208✔
714
        if won_ongoing_tenure_sortition {
208✔
715
            // we won the current ongoing tenure, but not the most recent sortition. Should we attempt to extend immediately or wait for the incoming miner?
716
            if let Ok(has_higher) = Self::has_higher_sortition_commits_to_stacks_tip_tenure(
80✔
717
                &self.sortdb,
80✔
718
                &mut self.chainstate,
80✔
719
                &sn,
80✔
720
                &canonical_stacks_snapshot,
80✔
721
            ) {
80✔
722
                if has_higher {
80✔
723
                    debug!("Relayer: Did not win current sortition but won the prior valid sortition. Will attempt to extend tenure after allowing the new miner some time to come online.";
69✔
724
                            "tenure_extend_wait_timeout_ms" => self.config.miner.tenure_extend_wait_timeout.as_millis(),
×
725
                    );
726
                    self.tenure_extend_time = Some(TenureExtendTime::unresponsive_winner(
69✔
727
                        self.config.miner.tenure_extend_wait_timeout,
69✔
728
                    ));
69✔
729
                } else {
730
                    info!("Relayer: no valid sortition since our last winning sortition. Will extend tenure.");
11✔
731
                    self.tenure_extend_time = Some(TenureExtendTime::immediate(
11✔
732
                        TenureExtendReason::BadSortitionWinner,
11✔
733
                    ));
11✔
734
                }
735
            }
×
736
        }
128✔
737
        MinerDirective::StopTenure
208✔
738
    }
1,444✔
739

740
    /// Choose a miner directive for a sortition with no winner.
741
    ///
742
    /// The decision process is a little tricky, because the right decision depends on:
743
    /// * whether or not we won the sortition that started the ongoing Stacks tenure
744
    /// * whether or not we won the last sortition with a winner
745
    /// * whether or not the last sortition winner has produced a Stacks block
746
    /// * whether or not the ongoing Stacks tenure is at or descended from the last-winning
747
    /// sortition
748
    ///
749
    /// Find out who won the last sortition with a winner.  If it was us, and if we haven't yet
750
    /// submitted a `BlockFound` tenure-change for it (which can happen if this given sortition is
751
    /// from a flash block), then start mining immediately with a "late" `BlockFound` tenure, _and_
752
    /// prepare to start mining right afterwards with an `Extended` tenure-change so as to represent
753
    /// the given sortition `sn`'s burn view in the Stacks chain.
754
    ///
755
    /// Otherwise, if did not win the last-winning sortition, then check to see if we're the ongoing
756
    /// Stack's tenure's miner. If so, then we _may_ start mining after a timeout _if_ the winner of
757
    /// the last-good sortition (not us) fails to submit a `BlockFound` tenure-change block.
758
    /// This can happen if `sn` was a flash block, and the remote miner has yet to process it.
759
    ///
760
    /// We won't always be able to mine -- for example, this could be an empty sortition, but the
761
    /// parent block could be an epoch 2 block.  In this case, the right thing to do is to wait for
762
    /// the next block-commit.
763
    fn choose_directive_sortition_without_winner(
141✔
764
        &mut self,
141✔
765
        sn: BlockSnapshot,
141✔
766
        mining_pk: &Hash160,
141✔
767
    ) -> Option<MinerDirective> {
141✔
768
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
141✔
769
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
141✔
770
                .expect("FATAL: failed to query sortition DB for stacks tip");
141✔
771
        let canonical_stacks_snapshot =
141✔
772
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch)
141✔
773
                .expect("FATAL: failed to query sortiiton DB for epoch")
141✔
774
                .expect("FATAL: no sortition for canonical stacks tip");
141✔
775

776
        // find out what epoch the Stacks tip is in.
777
        // If it's in epoch 2.x, then we must always begin a new tenure, but we can't do so
778
        // right now since this sortition has no winner.
779
        let cur_epoch = SortitionDB::get_stacks_epoch(
141✔
780
            self.sortdb.conn(),
141✔
781
            canonical_stacks_snapshot.block_height,
141✔
782
        )
783
        .expect("FATAL: failed to query sortition DB for epoch")
141✔
784
        .expect("FATAL: no epoch defined for existing sortition");
141✔
785

786
        if cur_epoch.epoch_id < StacksEpochId::Epoch30 {
141✔
787
            debug!(
×
788
                "As of sortition {}, there has not yet been a Nakamoto tip. Cannot mine.",
789
                &canonical_stacks_snapshot.consensus_hash
×
790
            );
791
            return None;
×
792
        }
141✔
793

794
        // find out who won the last non-empty sortition. It may have been us.
795
        let Ok(last_winning_snapshot) = Self::get_last_winning_snapshot(&self.sortdb, &sn)
141✔
796
            .inspect_err(|e| {
141✔
797
                warn!("Relayer: Failed to load last winning snapshot: {e:?}");
×
798
            })
×
799
        else {
800
            // this should be unreachable, but don't tempt fate.
801
            info!("Relayer: No prior snapshots have a winning sortition. Will not try to mine.");
×
802
            return None;
×
803
        };
804

805
        // Check if we won the last winning snapshot AND it commits to the ongoing tenure.
806
        let won_last_winning_snapshot =
141✔
807
            last_winning_snapshot.miner_pk_hash.as_ref() == Some(mining_pk);
141✔
808
        let canonical_stacks_tip =
141✔
809
            StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
141✔
810
        let commits_to_tip_tenure = Self::sortition_commits_to_stacks_tip_tenure(
141✔
811
            &mut self.chainstate,
141✔
812
            &canonical_stacks_tip,
141✔
813
            &canonical_stacks_snapshot,
141✔
814
            &last_winning_snapshot,
141✔
815
        ).unwrap_or_else(|e| {
141✔
816
            warn!(
×
817
                "Relayer: Failed to determine if last winning sortition commits to current tenure: {e:?}";
818
                "sortition_ch" => %sn.consensus_hash,
819
                "stacks_tip_ch" => %canonical_stacks_tip_ch
820
            );
821
            false
×
822
        });
×
823

824
        if (won_last_winning_snapshot && commits_to_tip_tenure)
141✔
825
            || self.config.get_node_config(false).mock_mining
97✔
826
        {
827
            debug!(
44✔
828
                "Relayer: we won the last winning sortition {}",
829
                &last_winning_snapshot.consensus_hash
×
830
            );
831

832
            if Self::need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) {
44✔
833
                info!(
44✔
834
                    "Relayer: will submit late BlockFound for {}",
835
                    &last_winning_snapshot.consensus_hash
44✔
836
                );
837
                // prepare to immediately extend after our BlockFound gets mined.
838
                self.tenure_extend_time = Some(TenureExtendTime::immediate(
44✔
839
                    TenureExtendReason::EmptySortition,
44✔
840
                ));
44✔
841
                return Some(MinerDirective::BeginTenure {
44✔
842
                    parent_tenure_start: StacksBlockId(
44✔
843
                        last_winning_snapshot.winning_stacks_block_hash.clone().0,
44✔
844
                    ),
44✔
845
                    burnchain_tip: sn,
44✔
846
                    election_block: last_winning_snapshot,
44✔
847
                    late: true,
44✔
848
                });
44✔
849
            }
×
850
            let tip_is_last_winning_snapshot = canonical_stacks_snapshot.block_height
×
851
                == last_winning_snapshot.block_height
×
852
                && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash;
×
853

854
            if tip_is_last_winning_snapshot {
×
855
                // this is the ongoing tenure snapshot. A BlockFound has already been issued. We
856
                // can instead opt to Extend immediately
857
                info!("Relayer: BlockFound already issued for the last winning sortition. Will extend tenure.");
×
858
                return Some(MinerDirective::ContinueTenure {
×
859
                    new_burn_view: sn.consensus_hash,
×
860
                });
×
861
            }
×
862
        }
97✔
863

864
        let won_ongoing_tenure_sortition =
97✔
865
            canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pk);
97✔
866
        if won_ongoing_tenure_sortition {
97✔
867
            info!("Relayer: No sortition, but we produced the canonical Stacks tip. Will extend tenure.");
93✔
868
            if !won_last_winning_snapshot {
93✔
869
                // delay trying to continue since the last snasphot with a sortition was won
870
                // by someone else -- there's a chance that this other miner will produce a
871
                // BlockFound in the interim.
872
                debug!("Relayer: Did not win last winning snapshot despite mining the ongoing tenure. Will attempt to extend tenure after allowing the new miner some time to produce a block.");
3✔
873
                self.tenure_extend_time = Some(TenureExtendTime::unresponsive_winner(
3✔
874
                    self.config.miner.tenure_extend_wait_timeout,
3✔
875
                ));
3✔
876
                return None;
3✔
877
            }
90✔
878
            return Some(MinerDirective::ContinueTenure {
90✔
879
                new_burn_view: sn.consensus_hash,
90✔
880
            });
90✔
881
        }
4✔
882

883
        info!("Relayer: No sortition, and we did not produce the last Stacks tip. Will not mine.");
4✔
884
        return None;
4✔
885
    }
141✔
886

887
    /// Determine if we the current tenure winner needs to issue a BlockFound.
888
    /// Assumes the caller has already checked that the last-winning snapshot was won by us.
889
    ///
890
    /// Returns true if the stacks tip's snapshot is an ancestor of the last-won sortition
891
    /// Returns false otherwise.
892
    fn need_block_found(
315✔
893
        canonical_stacks_snapshot: &BlockSnapshot,
315✔
894
        last_winning_snapshot: &BlockSnapshot,
315✔
895
    ) -> bool {
315✔
896
        // we won the last non-empty sortition. Has there been a BlockFound issued for it?
897
        // This would be true if the stacks tip's tenure is at or descends from this snapshot.
898
        // If there has _not_ been a BlockFound, then we should issue one.
899
        if canonical_stacks_snapshot.block_height > last_winning_snapshot.block_height {
315✔
900
            // stacks tip is ahead of this snapshot, so no BlockFound can be issued.
901
            test_debug!(
1✔
902
                "Stacks_tip_sn.block_height ({}) > last_winning_snapshot.block_height ({})",
903
                canonical_stacks_snapshot.block_height,
904
                last_winning_snapshot.block_height
905
            );
906
            false
1✔
907
        } else if canonical_stacks_snapshot.block_height == last_winning_snapshot.block_height
314✔
908
            && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash
18✔
909
        {
910
            // this is the ongoing tenure snapshot. A BlockFound has already been issued.
911
            test_debug!(
18✔
912
                "Ongoing tenure {} already represents last-winning snapshot",
913
                &canonical_stacks_snapshot.consensus_hash
×
914
            );
915
            false
18✔
916
        } else {
917
            // The stacks tip is behind the last-won sortition, so a BlockFound is still needed.
918
            true
296✔
919
        }
920
    }
315✔
921

922
    /// Given the pointer to a recently processed sortition, see if we won the sortition, and
923
    /// determine what miner action (if any) to take.
924
    ///
925
    /// Returns a directive to the relayer thread to either start, stop, or continue a tenure, if
926
    /// this sortition matches the sortition tip and we have a parent to build atop.
927
    ///
928
    /// Otherwise, returns None, meaning no action will be taken.
929
    // This method is covered by the e2e bitcoind tests, which do not show up
930
    //  in mutant coverage.
931
    #[cfg_attr(test, mutants::skip)]
932
    fn process_sortition(
4,415✔
933
        &mut self,
4,415✔
934
        consensus_hash: ConsensusHash,
4,415✔
935
        burn_hash: BurnchainHeaderHash,
4,415✔
936
        committed_index_hash: StacksBlockId,
4,415✔
937
    ) -> Option<MinerDirective> {
4,415✔
938
        let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash)
4,415✔
939
            .expect("FATAL: failed to query sortition DB")
4,415✔
940
            .expect("FATAL: unknown consensus hash");
4,415✔
941

942
        let was_winning_pkh = if let (Some(winning_pkh), Some(my_pkh)) = (
4,415✔
943
            sn.miner_pk_hash.as_ref(),
4,415✔
944
            self.get_mining_key_pkh().as_ref(),
4,415✔
945
        ) {
946
            winning_pkh == my_pkh
4,265✔
947
        } else {
948
            false
150✔
949
        };
950

951
        let won_sortition = sn.sortition && was_winning_pkh;
4,415✔
952
        if won_sortition {
4,415✔
953
            increment_stx_blocks_mined_counter();
3,374✔
954
        }
3,386✔
955
        self.globals.set_last_sortition(sn.clone());
4,415✔
956
        self.globals.counters.bump_blocks_processed();
4,415✔
957
        self.globals.counters.bump_sortitions_processed();
4,415✔
958

959
        // there may be a bufferred stacks block to process, so wake up the coordinator to check
960
        self.globals.coord_comms.announce_new_stacks_block();
4,415✔
961

962
        info!(
4,415✔
963
            "Relayer: Process sortition";
964
            "sortition_ch" => %consensus_hash,
965
            "burn_hash" => %burn_hash,
966
            "burn_height" => sn.block_height,
4,415✔
967
            "winning_txid" => %sn.winning_block_txid,
968
            "committed_parent" => %committed_index_hash,
969
            "won_sortition?" => won_sortition,
4,415✔
970
        );
971

972
        let cur_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())
4,415✔
973
            .expect("FATAL: failed to query sortition DB");
4,415✔
974

975
        if cur_sn.consensus_hash != consensus_hash {
4,415✔
976
            info!("Relayer: Current sortition {} is ahead of processed sortition {consensus_hash}; taking no action", &cur_sn.consensus_hash);
2,600✔
977
            self.globals
2,600✔
978
                .raise_initiative("process_sortition".to_string());
2,600✔
979
            return None;
2,600✔
980
        }
1,815✔
981

982
        // Reset the tenure extend time
983
        self.tenure_extend_time = None;
1,815✔
984
        let Some(mining_pk) = self.get_mining_key_pkh() else {
1,815✔
985
            debug!("No mining key, will not mine");
×
986
            return None;
×
987
        };
988

989
        let epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), sn.block_height)
1,815✔
990
            .expect("FATAL: epoch not found for current snapshot")
1,815✔
991
            .expect("FATAL: epoch not found for current snapshot");
1,815✔
992
        if !epoch.epoch_id.uses_nakamoto_blocks() {
1,815✔
993
            return None;
230✔
994
        }
1,585✔
995

996
        let directive_opt = if sn.sortition {
1,585✔
997
            Some(self.choose_directive_sortition_with_winner(sn, &mining_pk, committed_index_hash))
1,444✔
998
        } else {
999
            self.choose_directive_sortition_without_winner(sn, &mining_pk)
141✔
1000
        };
1001
        debug!(
1,585✔
1002
            "Relayer: Processed sortition {consensus_hash}: Miner directive is {directive_opt:?}"
1003
        );
1004
        directive_opt
1,585✔
1005
    }
4,415✔
1006

1007
    /// Constructs and returns a LeaderKeyRegisterOp out of the provided params
1008
    fn make_key_register_op(
37✔
1009
        vrf_public_key: VRFPublicKey,
37✔
1010
        consensus_hash: &ConsensusHash,
37✔
1011
        miner_pkh: &Hash160,
37✔
1012
    ) -> BlockstackOperationType {
37✔
1013
        BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp {
37✔
1014
            public_key: vrf_public_key,
37✔
1015
            memo: miner_pkh.as_bytes().to_vec(),
37✔
1016
            consensus_hash: consensus_hash.clone(),
37✔
1017
            vtxindex: 0,
37✔
1018
            txid: Txid([0u8; 32]),
37✔
1019
            block_height: 0,
37✔
1020
            burn_header_hash: BurnchainHeaderHash::zero(),
37✔
1021
        })
37✔
1022
    }
37✔
1023

1024
    /// Create and broadcast a VRF public key registration transaction.
1025
    /// Returns true if we succeed in doing so; false if not.
1026
    pub fn rotate_vrf_and_register(&mut self, burn_block: &BlockSnapshot) {
37✔
1027
        if self.last_vrf_key_burn_height.is_some() {
37✔
1028
            // already in-flight
1029
            return;
×
1030
        }
37✔
1031
        let cur_epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), burn_block.block_height)
37✔
1032
            .expect("FATAL: failed to query sortition DB")
37✔
1033
            .expect("FATAL: no epoch defined")
37✔
1034
            .epoch_id;
37✔
1035
        let (vrf_pk, _) = self.keychain.make_vrf_keypair(burn_block.block_height);
37✔
1036
        let burnchain_tip_consensus_hash = &burn_block.consensus_hash;
37✔
1037
        let miner_pkh = self.keychain.get_nakamoto_pkh();
37✔
1038

1039
        debug!(
37✔
1040
            "Submitting LeaderKeyRegister";
1041
            "vrf_pk" => vrf_pk.to_hex(),
×
1042
            "burn_block_height" => burn_block.block_height,
×
1043
            "miner_pkh" => miner_pkh.to_hex(),
×
1044
        );
1045

1046
        let op = Self::make_key_register_op(vrf_pk, burnchain_tip_consensus_hash, &miner_pkh);
37✔
1047

1048
        let mut op_signer = self.keychain.generate_op_signer();
37✔
1049
        if let Ok(txid) = self
37✔
1050
            .bitcoin_controller
37✔
1051
            .submit_operation(cur_epoch, op, &mut op_signer)
37✔
1052
        {
37✔
1053
            // advance key registration state
37✔
1054
            self.last_vrf_key_burn_height = Some(burn_block.block_height);
37✔
1055
            self.globals
37✔
1056
                .set_pending_leader_key_registration(burn_block.block_height, txid);
37✔
1057
            self.globals.counters.bump_naka_submitted_vrfs();
37✔
1058
        }
37✔
1059
    }
37✔
1060

1061
    /// Produce the block-commit for this upcoming tenure, if we can.
1062
    ///
1063
    /// Takes the Nakamoto chain tip (consensus hash, block header hash).
1064
    ///
1065
    /// Returns the (the most recent burn snapshot, the most recent stakcs tip, the commit-op) on success
1066
    /// Returns None if we fail somehow.
1067
    ///
1068
    /// TODO: unit test
1069
    pub(crate) fn make_block_commit(
2,906✔
1070
        &mut self,
2,906✔
1071
        tip_block_ch: &ConsensusHash,
2,906✔
1072
        tip_block_bh: &BlockHeaderHash,
2,906✔
1073
    ) -> Result<LastCommit, NakamotoNodeError> {
2,906✔
1074
        let tip_block_id = StacksBlockId::new(tip_block_ch, tip_block_bh);
2,906✔
1075
        let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())
2,906✔
1076
            .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?;
2,906✔
1077

1078
        let stacks_tip = StacksBlockId::new(tip_block_ch, tip_block_bh);
2,906✔
1079

1080
        // sanity check -- this block must exist and have been processed locally
1081
        let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header(
2,906✔
1082
            &mut self.chainstate.index_conn(),
2,906✔
1083
            &stacks_tip,
2,906✔
1084
            tip_block_ch,
2,906✔
1085
        )
1086
        .map_err(|e| {
2,906✔
1087
            error!(
×
1088
                "Relayer: Failed to get tenure-start block header for stacks tip {stacks_tip}: {e:?}"
1089
            );
1090
            NakamotoNodeError::ParentNotFound
×
1091
        })?
×
1092
        .ok_or_else(|| {
2,906✔
1093
            error!(
×
1094
                "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip}"
1095
            );
1096
            NakamotoNodeError::ParentNotFound
×
1097
        })?;
×
1098

1099
        // load the VRF proof generated in this tenure, so we can use it to seed the VRF in the
1100
        // upcoming tenure.  This may be an epoch2x VRF proof.
1101
        let tip_vrf_proof = NakamotoChainState::get_block_vrf_proof(
2,906✔
1102
            &mut self.chainstate.index_conn(),
2,906✔
1103
            &stacks_tip,
2,906✔
1104
            tip_block_ch,
2,906✔
1105
        )
1106
        .map_err(|e| {
2,906✔
1107
            error!("Failed to load VRF proof for {tip_block_ch} off of {stacks_tip}: {e:?}");
×
1108
            NakamotoNodeError::ParentNotFound
×
1109
        })?
×
1110
        .ok_or_else(|| {
2,906✔
1111
            error!("No block VRF proof for {tip_block_ch} off of {stacks_tip}");
×
1112
            NakamotoNodeError::ParentNotFound
×
1113
        })?;
×
1114

1115
        // let's figure out the recipient set!
1116
        let recipients = get_nakamoto_next_recipients(
2,906✔
1117
            &sort_tip,
2,906✔
1118
            &mut self.sortdb,
2,906✔
1119
            &mut self.chainstate,
2,906✔
1120
            &stacks_tip,
2,906✔
1121
            &self.burnchain,
2,906✔
1122
        )
1123
        .map_err(|e| {
2,906✔
1124
            error!("Relayer: Failure fetching recipient set: {e:?}");
×
1125
            NakamotoNodeError::SnapshotNotFoundForChainTip
×
1126
        })?;
×
1127

1128
        let commit_outs = match recipients.as_ref() {
2,906✔
1129
            // Under waterfall PoX, every block in the cycle (including
1130
            // prepare-phase blocks) commits to the cycle's sBTC address.
1131
            // Don't let the classic prepare-phase burn-output override clobber it.
1132
            Some(RewardSetInfo::Waterfall(_)) => {
1133
                RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet())
68✔
1134
            }
1135
            _ if self
2,838✔
1136
                .burnchain
2,838✔
1137
                .is_in_prepare_phase(sort_tip.block_height + 1) =>
2,838✔
1138
            {
1139
                vec![PoxAddress::standard_burn_address(self.config.is_mainnet())]
491✔
1140
            }
1141
            _ => RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()),
2,347✔
1142
        };
1143

1144
        // find the sortition that kicked off this tenure (it may be different from the sortition
1145
        // tip, such as when there is no sortition or when the miner of the current sortition never
1146
        // produces a block).  This is used to find the parent block-commit of the block-commit
1147
        // we'll submit.
1148
        let Ok(Some(tip_tenure_sortition)) =
2,906✔
1149
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), tip_block_ch)
2,906✔
1150
        else {
1151
            error!("Relayer: Failed to lookup the block snapshot of highest tenure ID"; "tenure_consensus_hash" => %tip_block_ch);
×
1152
            return Err(NakamotoNodeError::ParentNotFound);
×
1153
        };
1154

1155
        // find the parent block-commit of this commit, so we can find the parent vtxindex
1156
        // if the parent is a shadow block, then the vtxindex would be 0.
1157
        let commit_parent_block_burn_height = tip_tenure_sortition.block_height;
2,906✔
1158
        let commit_parent_winning_vtxindex = if let Ok(Some(parent_winning_tx)) =
2,906✔
1159
            SortitionDB::get_block_commit(
2,906✔
1160
                self.sortdb.conn(),
2,906✔
1161
                &tip_tenure_sortition.winning_block_txid,
2,906✔
1162
                &tip_tenure_sortition.sortition_id,
2,906✔
1163
            ) {
1164
            parent_winning_tx.vtxindex
2,902✔
1165
        } else {
1166
            debug!(
4✔
1167
                "{}/{} ({}) must be a shadow block, since it has no block-commit",
1168
                &tip_block_bh, &tip_block_ch, &tip_block_id
×
1169
            );
1170
            let Ok(Some(parent_version)) =
4✔
1171
                NakamotoChainState::get_nakamoto_block_version(self.chainstate.db(), &tip_block_id)
4✔
1172
            else {
1173
                error!(
×
1174
                    "Relayer: Failed to lookup block version of {}",
1175
                    &tip_block_id
×
1176
                );
1177
                return Err(NakamotoNodeError::ParentNotFound);
×
1178
            };
1179

1180
            if !NakamotoBlockHeader::is_shadow_block_version(parent_version) {
4✔
1181
                error!(
×
1182
                    "Relayer: parent block-commit of {} not found, and it is not a shadow block",
1183
                    &tip_block_id
×
1184
                );
1185
                return Err(NakamotoNodeError::ParentNotFound);
×
1186
            }
4✔
1187

1188
            0
4✔
1189
        };
1190

1191
        // epoch in which this commit will be sent (affects how the burnchain client processes it)
1192
        let Ok(Some(target_epoch)) =
2,906✔
1193
            SortitionDB::get_stacks_epoch(self.sortdb.conn(), sort_tip.block_height + 1)
2,906✔
1194
        else {
1195
            error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1);
×
1196
            return Err(NakamotoNodeError::SnapshotNotFoundForChainTip);
×
1197
        };
1198

1199
        let (_, burnchain_config) = self.check_burnchain_config_changed();
2,906✔
1200

1201
        // let's commit, but target the current burnchain tip with our modulus so the commit is
1202
        // only valid if it lands in the targeted burnchain block height
1203
        let burn_parent_modulus = u8::try_from(sort_tip.block_height % BURN_BLOCK_MINED_AT_MODULUS)
2,906✔
1204
            .map_err(|_| {
2,906✔
1205
                error!("Relayer: Block mining modulus is not u8");
×
1206
                NakamotoNodeError::UnexpectedChainState
×
1207
            })?;
×
1208

1209
        // burnchain signer for this commit
1210
        let sender = self.keychain.get_burnchain_signer();
2,906✔
1211

1212
        // VRF key this commit uses (i.e. the one we registered)
1213
        let key = self
2,906✔
1214
            .globals
2,906✔
1215
            .get_leader_key_registration_state()
2,906✔
1216
            .get_active()
2,906✔
1217
            .ok_or_else(|| NakamotoNodeError::NoVRFKeyActive)?;
2,906✔
1218

1219
        let mut commit = LeaderBlockCommitOp {
2,906✔
1220
            // NOTE: to be filled in
2,906✔
1221
            treatment: vec![],
2,906✔
1222
            // NOTE: PoX sunset has been disabled prior to taking effect
2,906✔
1223
            sunset_burn: 0,
2,906✔
1224
            // block-commits in Nakamoto commit to the ongoing tenure's tenure-start block (which,
2,906✔
1225
            // when processed, become the start-block of the tenure atop which this miner will
2,906✔
1226
            // produce blocks)
2,906✔
1227
            block_header_hash: BlockHeaderHash(
2,906✔
1228
                highest_tenure_start_block_header.index_block_hash().0,
2,906✔
1229
            ),
2,906✔
1230
            // the rest of this is the same as epoch2x commits, modulo the new epoch marker
2,906✔
1231
            burn_fee: burnchain_config.burn_fee_cap,
2,906✔
1232
            apparent_sender: sender,
2,906✔
1233
            key_block_ptr: u32::try_from(key.block_height)
2,906✔
1234
                .expect("FATAL: burn block height exceeded u32"),
2,906✔
1235
            key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"),
2,906✔
1236
            memo: vec![STACKS_EPOCH_LATEST_MARKER],
2,906✔
1237
            new_seed: VRFSeed::from_proof(&tip_vrf_proof),
2,906✔
1238
            parent_block_ptr: u32::try_from(commit_parent_block_burn_height)
2,906✔
1239
                .expect("FATAL: burn block height exceeded u32"),
2,906✔
1240
            parent_vtxindex: u16::try_from(commit_parent_winning_vtxindex)
2,906✔
1241
                .expect("FATAL: vtxindex exceeded u16"),
2,906✔
1242
            burn_parent_modulus,
2,906✔
1243
            commit_outs,
2,906✔
1244

2,906✔
1245
            // NOTE: to be filled in
2,906✔
1246
            input: (Txid([0; 32]), 0),
2,906✔
1247
            vtxindex: 0,
2,906✔
1248
            txid: Txid([0u8; 32]),
2,906✔
1249
            block_height: 0,
2,906✔
1250
            burn_header_hash: BurnchainHeaderHash::zero(),
2,906✔
1251
        };
2,906✔
1252

1253
        if std::env::var("FAULT_INJECTION_BLOCK_COMMIT_VTXINDEX_SENTINEL") == Ok("1".to_string()) {
2,906✔
1254
            info!("Zeroing parent_vtxindex");
×
1255
            commit.parent_vtxindex = 0;
×
1256
        }
2,906✔
1257

1258
        if std::env::var("FAULT_INJECTION_BLOCK_COMMIT_PARENT_SENTINEL") == Ok("1".to_string()) {
2,906✔
1259
            info!("Altering parent_block_ptr");
×
1260
            commit.parent_block_ptr = commit.parent_block_ptr.saturating_sub(1);
×
1261

1262
            let parent_tenure_tip_id = highest_tenure_start_block_header
×
1263
                .anchored_header
×
1264
                .as_stacks_nakamoto()
×
1265
                .unwrap()
×
1266
                .parent_block_id
×
1267
                .clone();
×
1268

1269
            let parent_tenure_tip =
×
1270
                NakamotoChainState::get_block_header(&self.chainstate.db(), &parent_tenure_tip_id)
×
1271
                    .unwrap()
×
1272
                    .unwrap();
×
1273

1274
            let parent_tip_vrf_proof = NakamotoChainState::get_block_vrf_proof(
×
1275
                &mut self.chainstate.index_conn(),
×
1276
                &stacks_tip,
×
1277
                &parent_tenure_tip.consensus_hash,
×
1278
            )
1279
            .unwrap()
×
1280
            .unwrap();
×
1281

1282
            info!(
×
1283
                "Altering new_seed from {} to {}",
1284
                &commit.new_seed,
×
1285
                &VRFSeed::from_proof(&parent_tip_vrf_proof)
×
1286
            );
1287
            commit.new_seed = VRFSeed::from_proof(&parent_tip_vrf_proof);
×
1288
        }
2,906✔
1289

1290
        Ok(LastCommit::new(
2,906✔
1291
            commit,
2,906✔
1292
            sort_tip,
2,906✔
1293
            stacks_tip,
2,906✔
1294
            highest_tenure_start_block_header.consensus_hash,
2,906✔
1295
            highest_tenure_start_block_header
2,906✔
1296
                .anchored_header
2,906✔
1297
                .block_hash(),
2,906✔
1298
            target_epoch.epoch_id,
2,906✔
1299
        ))
2,906✔
1300
    }
2,906✔
1301

1302
    #[cfg(test)]
1303
    fn fault_injection_stall_miner_startup() {
1,405✔
1304
        if TEST_MINER_THREAD_STALL.get() {
1,405✔
1305
            // Do an extra check just so we don't log EVERY time.
1306
            warn!("Relayer miner thread startup is stalled due to testing directive to stall the miner");
×
1307
            while TEST_MINER_THREAD_STALL.get() {
×
1308
                std::thread::sleep(std::time::Duration::from_millis(10));
×
1309
            }
×
1310
            warn!(
×
1311
                "Relayer miner thread startup is no longer stalled due to testing directive. Continuing..."
1312
            );
1313
        }
1,405✔
1314
    }
1,405✔
1315

1316
    #[cfg(not(test))]
1317
    fn fault_injection_stall_miner_startup() {}
1318

1319
    #[cfg(test)]
1320
    fn fault_injection_stall_miner_thread_startup() {
1,405✔
1321
        if TEST_MINER_THREAD_START_STALL.get() {
1,405✔
1322
            // Do an extra check just so we don't log EVERY time.
1323
            warn!("Miner thread startup is stalled due to testing directive");
×
1324
            while TEST_MINER_THREAD_START_STALL.get() {
×
1325
                std::thread::sleep(std::time::Duration::from_millis(10));
×
1326
            }
×
1327
            warn!(
×
1328
                "Miner thread startup is no longer stalled due to testing directive. Continuing..."
1329
            );
1330
        }
1,405✔
1331
    }
1,405✔
1332

1333
    #[cfg(not(test))]
1334
    fn fault_injection_stall_miner_thread_startup() {}
1335

1336
    /// Create the block miner thread state.
1337
    /// Only proceeds if all of the following are true:
1338
    /// * the miner is not blocked
1339
    /// * last_burn_block corresponds to the canonical sortition DB's chain tip
1340
    /// * the time of issuance is sufficiently recent
1341
    /// * there are no unprocessed stacks blocks in the staging DB
1342
    /// * the relayer has already tried a download scan that included this sortition (which, if a block was found, would have placed it into the staging DB and marked it as unprocessed)
1343
    /// * a miner thread is not running already
1344
    fn create_block_miner(
1,405✔
1345
        &mut self,
1,405✔
1346
        registered_key: RegisteredKey,
1,405✔
1347
        burn_election_block: BlockSnapshot,
1,405✔
1348
        burn_tip: BlockSnapshot,
1,405✔
1349
        parent_tenure_id: StacksBlockId,
1,405✔
1350
        reason: MinerReason,
1,405✔
1351
        burn_tip_at_start: &ConsensusHash,
1,405✔
1352
    ) -> Result<BlockMinerThread, NakamotoNodeError> {
1,405✔
1353
        if fault_injection_skip_mining(&self.config.node.rpc_bind, burn_tip.block_height) {
1,405✔
1354
            debug!(
×
1355
                "Relayer: fault injection skip mining at block height {}",
1356
                burn_tip.block_height
1357
            );
1358
            return Err(NakamotoNodeError::FaultInjection);
×
1359
        }
1,405✔
1360
        Self::fault_injection_stall_miner_startup();
1,405✔
1361

1362
        let burn_header_hash = burn_tip.burn_header_hash.clone();
1,405✔
1363
        let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())
1,405✔
1364
            .expect("FATAL: failed to query sortition DB for canonical burn chain tip");
1,405✔
1365

1366
        let burn_chain_tip = burn_chain_sn.burn_header_hash.clone();
1,405✔
1367

1368
        if &burn_chain_sn.consensus_hash != burn_tip_at_start {
1,405✔
1369
            info!(
×
1370
                "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}"
1371
            );
1372
            self.globals.counters.bump_missed_tenures();
×
1373
            return Err(NakamotoNodeError::MissedMiningOpportunity);
×
1374
        }
1,405✔
1375

1376
        debug!(
1,405✔
1377
            "Relayer: Spawn tenure thread";
1378
            "height" => burn_tip.block_height,
×
1379
            "burn_header_hash" => %burn_header_hash,
1380
            "parent_tenure_id" => %parent_tenure_id,
1381
            "reason" => %reason,
1382
            "burn_election_block.consensus_hash" => %burn_election_block.consensus_hash,
1383
            "burn_tip.consensus_hash" => %burn_tip.consensus_hash,
1384
        );
1385

1386
        let miner_thread_state = BlockMinerThread::new(
1,405✔
1387
            self,
1,405✔
1388
            registered_key,
1,405✔
1389
            burn_election_block,
1,405✔
1390
            burn_tip.clone(),
1,405✔
1391
            parent_tenure_id,
1,405✔
1392
            burn_tip_at_start,
1,405✔
1393
            reason,
1,405✔
1394
        )?;
×
1395
        Ok(miner_thread_state)
1,405✔
1396
    }
1,405✔
1397

1398
    fn start_new_tenure(
1,405✔
1399
        &mut self,
1,405✔
1400
        parent_tenure_start: StacksBlockId,
1,405✔
1401
        block_election_snapshot: BlockSnapshot,
1,405✔
1402
        burn_tip: BlockSnapshot,
1,405✔
1403
        reason: MinerReason,
1,405✔
1404
        burn_tip_at_start: &ConsensusHash,
1,405✔
1405
    ) -> Result<(), NakamotoNodeError> {
1,405✔
1406
        // when starting a new tenure, block the mining thread if its currently running.
1407
        // the new mining thread will join it (so that the new mining thread stalls, not the relayer)
1408
        let prior_tenure_thread = self.miner_thread.take();
1,405✔
1409
        self.miner_thread_burn_view = None;
1,405✔
1410

1411
        let vrf_key = self
1,405✔
1412
            .globals
1,405✔
1413
            .get_leader_key_registration_state()
1,405✔
1414
            .get_active()
1,405✔
1415
            .ok_or_else(|| {
1,405✔
1416
                warn!("Trying to start new tenure, but no VRF key active");
×
1417
                NakamotoNodeError::NoVRFKeyActive
×
1418
            })?;
×
1419
        let new_miner_state = self.create_block_miner(
1,405✔
1420
            vrf_key,
1,405✔
1421
            block_election_snapshot,
1,405✔
1422
            burn_tip.clone(),
1,405✔
1423
            parent_tenure_start.clone(),
1,405✔
1424
            reason,
1,405✔
1425
            burn_tip_at_start,
1,405✔
1426
        )?;
×
1427
        let miner_abort_flag = new_miner_state.get_abort_flag();
1,405✔
1428

1429
        debug!("Relayer: starting new tenure thread");
1,405✔
1430

1431
        let rand_id = thread_rng().gen::<u32>();
1,405✔
1432
        let is_mock = if self.config.node.mock_mining {
1,405✔
1433
            "mock-"
7✔
1434
        } else {
1435
            ""
1,398✔
1436
        };
1437

1438
        let new_miner_handle = std::thread::Builder::new()
1,405✔
1439
            .name(format!("{is_mock}miner.{parent_tenure_start}.{rand_id}",))
1,405✔
1440
            .stack_size(BLOCK_PROCESSOR_STACK_SIZE)
1,405✔
1441
            .spawn(move || {
1,405✔
1442
                debug!(
1,405✔
1443
                    "New block miner thread ID is {:?}",
1444
                    std::thread::current().id()
×
1445
                );
1446
                Self::fault_injection_stall_miner_thread_startup();
1,405✔
1447
                if let Err(e) = new_miner_state.run_miner(prior_tenure_thread) {
1,405✔
1448
                    info!("Miner thread failed: {e:?}");
1,405✔
1449
                    Err(e)
1,405✔
1450
                } else {
1451
                    Ok(())
×
1452
                }
1453
            })
1,405✔
1454
            .map_err(|e| {
1,405✔
1455
                error!("Relayer: Failed to start tenure thread: {e:?}");
×
1456
                NakamotoNodeError::SpawnError(e)
×
1457
            })?;
×
1458
        debug!(
1,405✔
1459
            "Relayer: started tenure thread ID {:?}",
1460
            new_miner_handle.thread().id()
×
1461
        );
1462
        self.miner_thread
1,405✔
1463
            .replace(MinerStopHandle::new(new_miner_handle, miner_abort_flag));
1,405✔
1464
        self.miner_thread_burn_view.replace(burn_tip);
1,405✔
1465
        Ok(())
1,405✔
1466
    }
1,405✔
1467

1468
    fn stop_tenure(&mut self) -> Result<(), NakamotoNodeError> {
337✔
1469
        // when stopping a tenure, block the mining thread if its currently running, then join it.
1470
        // do this in a new thread will (so that the new thread stalls, not the relayer)
1471
        let Some(prior_tenure_thread) = self.miner_thread.take() else {
337✔
1472
            debug!("Relayer: no tenure thread to stop");
95✔
1473
            return Ok(());
95✔
1474
        };
1475
        self.miner_thread_burn_view = None;
242✔
1476

1477
        let id = prior_tenure_thread.inner_thread().id();
242✔
1478
        let abort_flag = prior_tenure_thread.abort_flag.clone();
242✔
1479
        let globals = self.globals.clone();
242✔
1480

1481
        let stop_handle = std::thread::Builder::new()
242✔
1482
            .name(format!(
242✔
1483
                "tenure-stop({:?})-{}",
1484
                id, self.local_peer.data_url
1485
            ))
1486
            .spawn(move || prior_tenure_thread.stop(&globals))
242✔
1487
            .map_err(|e| {
242✔
1488
                error!("Relayer: Failed to spawn a stop-tenure thread: {e:?}");
×
1489
                NakamotoNodeError::SpawnError(e)
×
1490
            })?;
×
1491

1492
        self.miner_thread
242✔
1493
            .replace(MinerStopHandle::new(stop_handle, abort_flag));
242✔
1494
        debug!("Relayer: stopped tenure thread ID {id:?}");
242✔
1495
        Ok(())
242✔
1496
    }
337✔
1497

1498
    /// Get the public key hash for the mining key.
1499
    fn get_mining_key_pkh(&self) -> Option<Hash160> {
6,519✔
1500
        let Some(ref mining_key) = self.config.miner.mining_key else {
6,519✔
1501
            return None;
×
1502
        };
1503
        Some(Hash160::from_node_public_key(
6,519✔
1504
            &StacksPublicKey::from_private(mining_key),
6,519✔
1505
        ))
6,519✔
1506
    }
6,519✔
1507

1508
    /// Helper method to get the last snapshot with a winner
1509
    fn get_last_winning_snapshot(
423✔
1510
        sortdb: &SortitionDB,
423✔
1511
        sort_tip: &BlockSnapshot,
423✔
1512
    ) -> Result<BlockSnapshot, NakamotoNodeError> {
423✔
1513
        let ih = sortdb.index_handle(&sort_tip.sortition_id);
423✔
1514
        Ok(ih.get_last_snapshot_with_sortition(sort_tip.block_height)?)
423✔
1515
    }
423✔
1516

1517
    /// Returns true if the sortition `sn` commits to the tenure start block of the ongoing Stacks tenure `stacks_tip_sn`.
1518
    /// Returns false otherwise.
1519
    fn sortition_commits_to_stacks_tip_tenure(
1,450✔
1520
        chain_state: &mut StacksChainState,
1,450✔
1521
        stacks_tip_id: &StacksBlockId,
1,450✔
1522
        stacks_tip_sn: &BlockSnapshot,
1,450✔
1523
        sn: &BlockSnapshot,
1,450✔
1524
    ) -> Result<bool, NakamotoNodeError> {
1,450✔
1525
        if !sn.sortition {
1,450✔
1526
            // definitely not a valid sortition
1527
            debug!("Relayer: Sortition {} is empty", &sn.consensus_hash);
×
1528
            return Ok(false);
×
1529
        }
1,450✔
1530
        // The sortition must commit to the tenure start block of the ongoing Stacks tenure.
1531
        let mut ic = chain_state.index_conn();
1,450✔
1532
        let parent_tenure_id = StacksBlockId(sn.winning_stacks_block_hash.clone().0);
1,450✔
1533
        let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header(
1,450✔
1534
            &mut ic,
1,450✔
1535
            stacks_tip_id,
1,450✔
1536
            &stacks_tip_sn.consensus_hash,
1,450✔
1537
        )?
×
1538
        .ok_or_else(|| {
1,450✔
1539
            error!(
×
1540
                "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip_id}"
1541
            );
1542
            NakamotoNodeError::ParentNotFound
×
1543
        })?;
×
1544

1545
        let highest_tenure_start_block_id = highest_tenure_start_block_header.index_block_hash();
1,450✔
1546
        if highest_tenure_start_block_id != parent_tenure_id {
1,450✔
1547
            debug!("Relayer: Sortition {} is at the tip, but does not commit to {parent_tenure_id} so cannot be valid", &sn.consensus_hash;
119✔
1548
                "highest_tenure_start_block_header_block_id" => %highest_tenure_start_block_id);
1549
            return Ok(false);
119✔
1550
        }
1,331✔
1551

1552
        Ok(true)
1,331✔
1553
    }
1,450✔
1554

1555
    /// Determine the highest sortition higher than `elected_tenure_id`, but no higher than
1556
    /// `sort_tip` whose winning commit's parent tenure ID matches the `stacks_tip`,
1557
    /// and whose consensus hash matches the `stacks_tip`'s tenure ID.
1558
    ///
1559
    /// Returns Ok(true) if such a sortition is found, and is higher than that of
1560
    /// `elected_tenure_id`.
1561
    /// Returns Ok(false) if no such sortition is found.
1562
    /// Returns Err(..) on DB errors.
1563
    fn has_higher_sortition_commits_to_stacks_tip_tenure(
80✔
1564
        sortdb: &SortitionDB,
80✔
1565
        chain_state: &mut StacksChainState,
80✔
1566
        sortition_tip: &BlockSnapshot,
80✔
1567
        elected_tenure: &BlockSnapshot,
80✔
1568
    ) -> Result<bool, NakamotoNodeError> {
80✔
1569
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
80✔
1570
            SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap();
80✔
1571
        let canonical_stacks_tip =
80✔
1572
            StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
80✔
1573

1574
        let Ok(Some(canonical_stacks_tip_sn)) =
80✔
1575
            SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &canonical_stacks_tip_ch)
80✔
1576
        else {
1577
            return Err(NakamotoNodeError::ParentNotFound);
×
1578
        };
1579

1580
        sortdb
80✔
1581
            .find_from(sortition_tip.clone(), |cursor| {
91✔
1582
                debug!(
91✔
1583
                    "Relayer: check sortition {} to see if it is valid",
1584
                    &cursor.consensus_hash
×
1585
                );
1586
                // have we reached the last tenure we're looking at?
1587
                if cursor.block_height <= elected_tenure.block_height {
91✔
1588
                    return Ok(FindIter::Halt);
11✔
1589
                }
80✔
1590

1591
                if Self::sortition_commits_to_stacks_tip_tenure(
80✔
1592
                    chain_state,
80✔
1593
                    &canonical_stacks_tip,
80✔
1594
                    &canonical_stacks_tip_sn,
80✔
1595
                    &cursor,
80✔
1596
                )? {
×
1597
                    return Ok(FindIter::Found(()));
69✔
1598
                }
11✔
1599

1600
                // nope. continue the search
1601
                return Ok(FindIter::Continue);
11✔
1602
            })
91✔
1603
            .map(|found| found.is_some())
80✔
1604
    }
80✔
1605

1606
    /// Attempt to continue a miner's tenure into the next burn block.
1607
    /// This is allowed if the miner won the last good sortition -- that is, the sortition which
1608
    /// elected the local view of the canonical Stacks fork's ongoing tenure.
1609
    /// Or if the miner won the last valid sortition prior to the current and the current miner
1610
    /// has failed to produce a block before the required timeout.
1611
    ///
1612
    /// This function assumes that the caller has checked that the sortition referred to by
1613
    /// `new_burn_view` does not have a sortition winner or that the winner has not produced a
1614
    /// valid block yet.
1615
    fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> {
94✔
1616
        if let Err(e) = self.stop_tenure() {
94✔
1617
            error!("Relayer: Failed to stop tenure: {e:?}");
×
1618
            return Ok(());
×
1619
        }
94✔
1620
        debug!("Relayer: successfully stopped tenure; will try to continue.");
94✔
1621

1622
        // try to extend, but only if we aren't already running a thread for the current or newer
1623
        // burnchain view
1624
        let Ok(sn) =
94✔
1625
            SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).inspect_err(|e| {
94✔
1626
                error!("Relayer: failed to read canonical burnchain sortition: {e:?}");
×
1627
            })
×
1628
        else {
1629
            return Ok(());
×
1630
        };
1631

1632
        if let Some(miner_thread_burn_view) = self.miner_thread_burn_view.as_ref() {
94✔
1633
            // a miner thread is already running.  If its burn view is the same as the canonical
1634
            // tip, then do nothing
1635
            if sn.consensus_hash == miner_thread_burn_view.consensus_hash {
×
1636
                info!("Relayer: will not tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %sn.consensus_hash);
×
1637
                return Ok(());
×
1638
            }
×
1639
        }
94✔
1640

1641
        // Get the necessary snapshots and state
1642
        let burn_tip =
94✔
1643
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view)?
94✔
1644
                .ok_or_else(|| {
94✔
1645
                    error!("Relayer: failed to get block snapshot for new burn view");
×
1646
                    NakamotoNodeError::SnapshotNotFoundForChainTip
×
1647
                })?;
×
1648
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
94✔
1649
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap();
94✔
1650
        let canonical_stacks_tip =
94✔
1651
            StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
94✔
1652
        let canonical_stacks_snapshot = SortitionDB::get_block_snapshot_consensus(
94✔
1653
            self.sortdb.conn(),
94✔
1654
            &canonical_stacks_tip_ch,
94✔
1655
        )?
×
1656
        .ok_or_else(|| {
94✔
1657
            error!("Relayer: failed to get block snapshot for canonical tip");
×
1658
            NakamotoNodeError::SnapshotNotFoundForChainTip
×
1659
        })?;
×
1660
        let reason = MinerReason::Extended {
94✔
1661
            burn_view_consensus_hash: new_burn_view.clone(),
94✔
1662
        };
94✔
1663

1664
        if let Err(e) = self.start_new_tenure(
94✔
1665
            canonical_stacks_tip.clone(),
94✔
1666
            canonical_stacks_snapshot.clone(),
94✔
1667
            burn_tip.clone(),
94✔
1668
            reason.clone(),
94✔
1669
            &new_burn_view,
94✔
1670
        ) {
94✔
1671
            error!("Relayer: Failed to start new tenure: {e:?}");
×
1672
        } else {
1673
            debug!("Relayer: successfully started new tenure.";
94✔
1674
                   "parent_tenure_start" => %canonical_stacks_tip,
1675
                   "burn_tip" => %burn_tip.consensus_hash,
1676
                   "burn_view_snapshot" => %burn_tip.consensus_hash,
1677
                   "block_election_snapshot" => %canonical_stacks_snapshot.consensus_hash,
1678
                   "reason" => %reason);
1679
        }
1680
        Ok(())
94✔
1681
    }
94✔
1682

1683
    fn handle_sortition(
4,415✔
1684
        &mut self,
4,415✔
1685
        consensus_hash: ConsensusHash,
4,415✔
1686
        burn_hash: BurnchainHeaderHash,
4,415✔
1687
        committed_index_hash: StacksBlockId,
4,415✔
1688
    ) -> bool {
4,415✔
1689
        let miner_instruction =
1,578✔
1690
            match self.process_sortition(consensus_hash, burn_hash, committed_index_hash) {
4,415✔
1691
                Some(miner_instruction) => miner_instruction,
1,578✔
1692
                None => {
1693
                    return true;
2,837✔
1694
                }
1695
            };
1696

1697
        match miner_instruction {
1,578✔
1698
            MinerDirective::BeginTenure {
1699
                parent_tenure_start,
1,276✔
1700
                burnchain_tip,
1,276✔
1701
                election_block,
1,276✔
1702
                late,
1,276✔
1703
            } => match self.start_new_tenure(
1,276✔
1704
                parent_tenure_start.clone(),
1,276✔
1705
                election_block.clone(),
1,276✔
1706
                election_block.clone(),
1,276✔
1707
                MinerReason::BlockFound { late },
1,276✔
1708
                &burnchain_tip.consensus_hash,
1,276✔
1709
            ) {
1,276✔
1710
                Ok(()) => {
1711
                    debug!("Relayer: successfully started new tenure.";
1,276✔
1712
                           "parent_tenure_start" => %parent_tenure_start,
1713
                           "burn_tip" => %burnchain_tip.consensus_hash,
1714
                           "burn_view_snapshot" => %burnchain_tip.consensus_hash,
1715
                           "block_election_snapshot" => %burnchain_tip.consensus_hash,
1716
                           "reason" => %MinerReason::BlockFound { late });
×
1717
                }
1718
                Err(e) => {
×
1719
                    error!("Relayer: Failed to start new tenure: {e:?}");
×
1720
                }
1721
            },
1722
            MinerDirective::ContinueTenure { new_burn_view } => {
94✔
1723
                match self.continue_tenure(new_burn_view) {
94✔
1724
                    Ok(()) => {
1725
                        debug!("Relayer: successfully handled continue tenure.");
94✔
1726
                    }
1727
                    Err(e) => {
×
1728
                        error!("Relayer: Failed to continue tenure: {e:?}");
×
1729
                        return false;
×
1730
                    }
1731
                }
1732
            }
1733
            MinerDirective::StopTenure => match self.stop_tenure() {
208✔
1734
                Ok(()) => {
1735
                    debug!("Relayer: successfully stopped tenure.");
208✔
1736
                }
1737
                Err(e) => {
×
1738
                    error!("Relayer: Failed to stop tenure: {e:?}");
×
1739
                }
1740
            },
1741
        }
1742

1743
        self.globals.counters.bump_naka_miner_directives();
1,578✔
1744
        true
1,578✔
1745
    }
4,415✔
1746

1747
    #[cfg(test)]
1748
    fn fault_injection_skip_block_commit(&self) -> bool {
43,052✔
1749
        self.globals.counters.skip_commit_op.get()
43,052✔
1750
    }
43,052✔
1751

1752
    #[cfg(not(test))]
1753
    fn fault_injection_skip_block_commit(&self) -> bool {
1754
        false
1755
    }
1756

1757
    /// Get the canonical tip for the miner to commit to.
1758
    /// This is provided as a separate function so that it can be overridden for testing.
1759
    #[cfg(not(test))]
1760
    fn fault_injection_get_tip_for_commit(&self) -> Option<(ConsensusHash, BlockHeaderHash)> {
1761
        None
1762
    }
1763

1764
    #[cfg(test)]
1765
    fn fault_injection_get_tip_for_commit(&self) -> Option<(ConsensusHash, BlockHeaderHash)> {
2,906✔
1766
        TEST_MINER_COMMIT_TIP.get()
2,906✔
1767
    }
2,906✔
1768

1769
    fn get_commit_for_tip(&mut self) -> Result<(ConsensusHash, BlockHeaderHash), DbError> {
2,906✔
1770
        if let Some((consensus_hash, block_header_hash)) = self.fault_injection_get_tip_for_commit()
2,906✔
1771
        {
1772
            info!("Relayer: using test tip for commit";
104✔
1773
                "consensus_hash" => %consensus_hash,
1774
                "block_header_hash" => %block_header_hash,
1775
            );
1776
            Ok((consensus_hash, block_header_hash))
104✔
1777
        } else {
1778
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
2,802✔
1779
        }
1780
    }
2,906✔
1781

1782
    /// Generate and submit the next block-commit, and record it locally
1783
    fn issue_block_commit(&mut self) -> Result<(), NakamotoNodeError> {
43,052✔
1784
        if self.fault_injection_skip_block_commit() {
43,052✔
1785
            debug!(
40,146✔
1786
                "Relayer: not submitting block-commit to bitcoin network due to test directive."
1787
            );
1788
            return Ok(());
40,146✔
1789
        }
2,906✔
1790
        let (tip_block_ch, tip_block_bh) = self.get_commit_for_tip().unwrap_or_else(|e| {
2,906✔
1791
            panic!("Failed to load canonical stacks tip: {e:?}");
×
1792
        });
1793
        let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?;
2,906✔
1794

1795
        let Some(tip_height) = NakamotoChainState::get_block_header(
2,906✔
1796
            self.chainstate.db(),
2,906✔
1797
            &StacksBlockId::new(&tip_block_ch, &tip_block_bh),
2,906✔
1798
        )
1799
        .map_err(|e| {
2,906✔
1800
            warn!("Relayer: failed to load tip {tip_block_ch}/{tip_block_bh}: {e:?}");
×
1801
            NakamotoNodeError::ParentNotFound
×
1802
        })?
×
1803
        .map(|header| header.stacks_block_height) else {
2,906✔
1804
            warn!(
×
1805
                "Relayer: failed to load height for tip {tip_block_ch}/{tip_block_bh} (got None)"
1806
            );
1807
            return Err(NakamotoNodeError::ParentNotFound);
×
1808
        };
1809

1810
        // sign and broadcast
1811
        let mut op_signer = self.keychain.generate_op_signer();
2,906✔
1812
        let res = self.bitcoin_controller.submit_operation(
2,906✔
1813
            *last_committed.get_epoch_id(),
2,906✔
1814
            BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()),
2,906✔
1815
            &mut op_signer,
2,906✔
1816
        );
1817
        let txid = match res {
2,906✔
1818
            Ok(txid) => txid,
1,910✔
1819
            Err(e) => {
996✔
1820
                if self.config.node.mock_mining {
996✔
1821
                    debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction");
896✔
1822
                    return Ok(());
896✔
1823
                }
100✔
1824
                warn!("Failed to submit block-commit bitcoin transaction: {e}");
100✔
1825
                return Err(NakamotoNodeError::BurnchainSubmissionFailed(e));
100✔
1826
            }
1827
        };
1828

1829
        info!(
1,910✔
1830
            "Relayer: Submitted block-commit";
1831
            "tip_consensus_hash" => %tip_block_ch,
1832
            "tip_block_hash" => %tip_block_bh,
1833
            "tip_height" => %tip_height,
1834
            "tip_block_id" => %StacksBlockId::new(&tip_block_ch, &tip_block_bh),
1,910✔
1835
            "txid" => %txid,
1836
        );
1837

1838
        // update local state
1839
        last_committed.set_txid(&txid);
1,910✔
1840
        self.globals.counters.bump_naka_submitted_commits(
1,910✔
1841
            last_committed.burn_tip.block_height,
1,910✔
1842
            tip_height,
1,910✔
1843
            last_committed.block_commit.burn_fee,
1,910✔
1844
            &last_committed.tenure_consensus_hash,
1,910✔
1845
        );
1846
        self.last_committed = Some(last_committed);
1,910✔
1847

1848
        Ok(())
1,910✔
1849
    }
43,052✔
1850

1851
    /// Determine what the relayer should do to advance the chain.
1852
    /// * If this isn't a miner, then it's always nothing.
1853
    /// * Otherwise, if we haven't done so already, go register a VRF public key
1854
    /// * If the stacks chain tip or burnchain tip has changed, then issue a block-commit
1855
    /// * If the last burn view we started a miner for is not the canonical burn view, then
1856
    /// try and start a new tenure (or continue an existing one).
1857
    fn initiative(&mut self) -> Result<Option<RelayerDirective>, NakamotoNodeError> {
277,744✔
1858
        if !self.is_miner {
277,744✔
1859
            return Ok(None);
72,795✔
1860
        }
204,949✔
1861

1862
        match self.globals.get_leader_key_registration_state() {
204,949✔
1863
            // do we need a VRF key registration?
1864
            LeaderKeyRegistrationState::Inactive => {
1865
                let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())?;
38✔
1866
                return Ok(Some(RelayerDirective::RegisterKey(sort_tip)));
38✔
1867
            }
1868
            // are we still waiting on a pending registration?
1869
            LeaderKeyRegistrationState::Pending(..) => {
1870
                return Ok(None);
4,250✔
1871
            }
1872
            LeaderKeyRegistrationState::Active(_) => {}
200,661✔
1873
        };
1874

1875
        // load up canonical sortition and stacks tips
1876
        let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())?;
200,661✔
1877

1878
        // NOTE: this may be an epoch2x tip
1879
        let (stacks_tip_ch, stacks_tip_bh) =
200,661✔
1880
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())?;
200,661✔
1881
        let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh);
200,661✔
1882

1883
        // check stacks and sortition tips to see if any chainstate change has happened.
1884
        // did our view of the sortition history change?
1885
        // if so, then let's try and confirm the highest tenure so far.
1886
        let burnchain_changed = self
200,661✔
1887
            .last_committed
200,661✔
1888
            .as_ref()
200,661✔
1889
            .map(|cmt| cmt.get_burn_tip().consensus_hash != sort_tip.consensus_hash)
200,661✔
1890
            .unwrap_or(true);
200,661✔
1891

1892
        let highest_tenure_changed = self
200,661✔
1893
            .last_committed
200,661✔
1894
            .as_ref()
200,661✔
1895
            .map(|cmt| cmt.get_tenure_id() != &stacks_tip_ch)
200,661✔
1896
            .unwrap_or(true);
200,661✔
1897

1898
        debug!("Relayer: initiative to commit";
200,661✔
1899
               "sortititon tip" => %sort_tip.consensus_hash,
1900
               "stacks tip" => %stacks_tip,
1901
               "stacks_tip_ch" => %stacks_tip_ch,
1902
               "stacks_tip_bh" => %stacks_tip_bh,
1903
               "last-commit burn view" => %self.last_committed.as_ref().map(|cmt| cmt.get_burn_tip().consensus_hash.to_string()).unwrap_or("(not set)".to_string()),
×
1904
               "last-commit ongoing tenure" => %self.last_committed.as_ref().map(|cmt| cmt.get_tenure_id().to_string()).unwrap_or("(not set)".to_string()),
×
1905
               "burnchain view changed?" => %burnchain_changed,
1906
               "highest tenure changed?" => %highest_tenure_changed);
1907

1908
        // If the miner spend or config has changed, we want to RBF with new config values.
1909
        let (burnchain_config_changed, _) = self.check_burnchain_config_changed();
200,661✔
1910
        let miner_config_changed = self.check_miner_config_changed();
200,661✔
1911

1912
        if burnchain_config_changed || miner_config_changed {
200,661✔
1913
            info!("Miner spend or config changed; issuing block commit with new values";
2✔
1914
                "miner_spend_changed" => %burnchain_config_changed,
1915
                "miner_config_changed" => %miner_config_changed,
1916
            );
1917
            return Ok(Some(RelayerDirective::IssueBlockCommit(
2✔
1918
                stacks_tip_ch,
2✔
1919
                stacks_tip_bh,
2✔
1920
            )));
2✔
1921
        }
200,659✔
1922

1923
        if !burnchain_changed && !highest_tenure_changed {
200,659✔
1924
            // nothing to do
1925
            return Ok(None);
120,099✔
1926
        }
80,560✔
1927

1928
        if highest_tenure_changed {
80,560✔
1929
            // highest-tenure view changed, so we need to send (or RBF) a commit
1930
            return Ok(Some(RelayerDirective::IssueBlockCommit(
35,406✔
1931
                stacks_tip_ch,
35,406✔
1932
                stacks_tip_bh,
35,406✔
1933
            )));
35,406✔
1934
        }
45,154✔
1935

1936
        debug!("Relayer: burnchain view changed, but highest tenure did not");
45,154✔
1937
        // First, check if the changed burnchain view includes any
1938
        // sortitions. If it doesn't submit a block commit immediately.
1939
        //
1940
        // If it does, then wait a bit for the first block in the new
1941
        // tenure to arrive. This is to avoid submitting a block
1942
        // commit that will be immediately RBFed when the first
1943
        // block arrives.
1944
        if let Some(last_committed) = self.last_committed.as_ref() {
45,154✔
1945
            // check if all the sortitions after `last_tenure` are empty sortitions. if they are,
1946
            //  we don't need to wait at all to submit a commit
1947
            let last_tenure_tip_height = SortitionDB::get_consensus_hash_height(
45,154✔
1948
                &self.sortdb,
45,154✔
1949
                last_committed.get_tenure_id(),
45,154✔
1950
            )?
×
1951
            .ok_or_else(|| NakamotoNodeError::ParentNotFound)?;
45,154✔
1952
            let no_sortitions_after_last_tenure = self
45,154✔
1953
                .sortdb
45,154✔
1954
                .find_in_canonical::<_, _, NakamotoNodeError>(|cursor| {
47,663✔
1955
                    if cursor.block_height <= last_tenure_tip_height {
47,663✔
1956
                        return Ok(FindIter::Halt);
71✔
1957
                    }
47,592✔
1958
                    if cursor.sortition {
47,592✔
1959
                        return Ok(FindIter::Found(()));
45,083✔
1960
                    }
2,509✔
1961
                    Ok(FindIter::Continue)
2,509✔
1962
                })?
47,663✔
1963
                .is_none();
45,154✔
1964
            if no_sortitions_after_last_tenure {
45,154✔
1965
                return Ok(Some(RelayerDirective::IssueBlockCommit(
71✔
1966
                    stacks_tip_ch,
71✔
1967
                    stacks_tip_bh,
71✔
1968
                )));
71✔
1969
            }
45,083✔
1970
        }
×
1971

1972
        if self.new_tenure_timeout.is_ready(
45,083✔
1973
            &sort_tip.consensus_hash,
45,083✔
1974
            &self.config.miner.block_commit_delay,
45,083✔
1975
        ) {
1976
            return Ok(Some(RelayerDirective::IssueBlockCommit(
7,573✔
1977
                stacks_tip_ch,
7,573✔
1978
                stacks_tip_bh,
7,573✔
1979
            )));
7,573✔
1980
        } else {
1981
            if let Some(deadline) = self
37,510✔
1982
                .new_tenure_timeout
37,510✔
1983
                .deadline(&self.config.miner.block_commit_delay)
37,510✔
1984
            {
37,510✔
1985
                self.next_initiative = std::cmp::min(self.next_initiative, deadline);
37,510✔
1986
            }
37,510✔
1987

1988
            return Ok(None);
37,510✔
1989
        }
1990
    }
277,744✔
1991

1992
    /// Try to start up a tenure-extend if the tenure_extend_time has expired.
1993
    ///
1994
    /// Will check if the tenure-extend time was set and has expired. If so, will
1995
    /// check if the current miner thread needs to issue a BlockFound or if it can
1996
    /// immediately tenure-extend.
1997
    ///
1998
    /// Note: tenure_extend_time is only set to Some(_) if during sortition processing, the sortition
1999
    /// winner commit is corrupted or the winning miner has yet to produce a block.
2000
    fn check_tenure_timers(&mut self) {
337,147✔
2001
        // Should begin a tenure-extend?
2002
        let Some(tenure_extend_time) = self.tenure_extend_time.clone() else {
337,147✔
2003
            // No tenure extend time set, so nothing to do.
2004
            return;
314,603✔
2005
        };
2006
        if !tenure_extend_time.should_extend() {
22,544✔
2007
            test_debug!(
22,255✔
2008
                "Relayer: will not try to tenure-extend yet ({} <= {})",
2009
                tenure_extend_time.elapsed().as_secs(),
×
2010
                tenure_extend_time.timeout().as_secs()
×
2011
            );
2012
            return;
22,255✔
2013
        }
289✔
2014

2015
        let Some(mining_pkh) = self.get_mining_key_pkh() else {
289✔
2016
            // This shouldn't really ever hit, but just in case.
2017
            warn!("Will not tenure extend -- no mining key");
×
2018
            // If we don't have a mining key set, don't bother checking again.
2019
            self.tenure_extend_time = None;
×
2020
            return;
×
2021
        };
2022
        // reset timer so we can try again if for some reason a miner was already running (e.g. a
2023
        // blockfound from earlier).
2024
        self.tenure_extend_time
289✔
2025
            .as_mut()
289✔
2026
            .map(|t| t.refresh(self.config.miner.tenure_extend_poll_timeout));
289✔
2027
        // try to extend, but only if we aren't already running a thread for the current or newer
2028
        // burnchain view
2029
        let Ok(burn_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())
289✔
2030
            .inspect_err(|e| {
289✔
2031
                error!("Failed to read canonical burnchain sortition: {e:?}");
×
2032
            })
×
2033
        else {
2034
            return;
×
2035
        };
2036

2037
        if let Some(miner_thread_burn_view) = self.miner_thread_burn_view.as_ref() {
289✔
2038
            // a miner thread is already running.  If its burn view is the same as the canonical
2039
            // tip, then do nothing for now
2040
            if burn_tip.consensus_hash == miner_thread_burn_view.consensus_hash {
271✔
2041
                info!("Will not try to start a tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %burn_tip.consensus_hash);
×
2042
                // Do not reset the timer, as we may be able to extend later.
2043
                return;
×
2044
            }
271✔
2045
        }
18✔
2046

2047
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
289✔
2048
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
289✔
2049
                .expect("FATAL: failed to query sortition DB for stacks tip");
289✔
2050
        let canonical_stacks_tip =
289✔
2051
            StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
289✔
2052
        let canonical_stacks_snapshot =
289✔
2053
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch)
289✔
2054
                .expect("FATAL: failed to query sortiiton DB for epoch")
289✔
2055
                .expect("FATAL: no sortition for canonical stacks tip");
289✔
2056

2057
        match tenure_extend_time.reason() {
289✔
2058
            TenureExtendReason::BadSortitionWinner | TenureExtendReason::EmptySortition => {
2059
                // Before we try to extend, check if we need to issue a BlockFound
2060
                let Ok(last_winning_snapshot) =
282✔
2061
                    Self::get_last_winning_snapshot(&self.sortdb, &burn_tip).inspect_err(|e| {
282✔
2062
                        warn!("Failed to load last winning snapshot: {e:?}");
×
2063
                    })
×
2064
                else {
2065
                    // this should be unreachable, but don't tempt fate.
2066
                    info!("No prior snapshots have a winning sortition. Will not try to mine.");
×
2067
                    self.tenure_extend_time = None;
×
2068
                    return;
×
2069
                };
2070
                let won_last_winning_snapshot =
282✔
2071
                    last_winning_snapshot.miner_pk_hash.as_ref() == Some(&mining_pkh);
282✔
2072
                if won_last_winning_snapshot
282✔
2073
                    && Self::need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot)
271✔
2074
                {
2075
                    info!("Will not tenure extend yet -- need to issue a BlockFound first");
252✔
2076
                    // We may manage to extend later, so don't set the timer to None.
2077
                    return;
252✔
2078
                }
30✔
2079
            }
2080
            TenureExtendReason::UnresponsiveWinner => {}
7✔
2081
        }
2082

2083
        let won_ongoing_tenure_sortition =
37✔
2084
            canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(&mining_pkh);
37✔
2085
        if !won_ongoing_tenure_sortition {
37✔
2086
            debug!("Will not tenure extend. Did not win ongoing tenure sortition";
2✔
2087
                "burn_chain_sortition_tip_ch" => %burn_tip.consensus_hash,
2088
                "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch,
2089
                "burn_chain_sortition_tip_mining_pk" => ?burn_tip.miner_pk_hash,
2090
                "mining_pk" => %mining_pkh
2091
            );
2092
            self.tenure_extend_time = None;
2✔
2093
            return;
2✔
2094
        }
35✔
2095
        // If we reach this code, we have either won the last winning snapshot and have already issued a block found for it and should extend.
2096
        // OR we did not win the last snapshot, but the person who did has failed to produce a block and we should extend our old tenure.
2097
        if let Err(e) = self.stop_tenure() {
35✔
2098
            error!("Relayer: Failed to stop tenure: {e:?}");
×
2099
            return;
×
2100
        }
35✔
2101
        let reason = MinerReason::Extended {
35✔
2102
            burn_view_consensus_hash: burn_tip.consensus_hash.clone(),
35✔
2103
        };
35✔
2104
        debug!("Relayer: successfully stopped tenure; will try to continue.");
35✔
2105
        if let Err(e) = self.start_new_tenure(
35✔
2106
            canonical_stacks_tip.clone(),
35✔
2107
            canonical_stacks_snapshot.clone(),
35✔
2108
            burn_tip.clone(),
35✔
2109
            reason.clone(),
35✔
2110
            &burn_tip.consensus_hash,
35✔
2111
        ) {
35✔
2112
            error!("Relayer: Failed to start new tenure: {e:?}");
×
2113
        } else {
2114
            debug!("Relayer: successfully started new tenure.";
35✔
2115
                   "parent_tenure_start" => %canonical_stacks_tip,
2116
                   "burn_tip" => %burn_tip.consensus_hash,
2117
                   "burn_view_snapshot" => %burn_tip.consensus_hash,
2118
                   "block_election_snapshot" => %canonical_stacks_snapshot.consensus_hash,
2119
                   "reason" => %reason);
2120
            self.tenure_extend_time = None;
35✔
2121
        }
2122
    }
337,147✔
2123

2124
    /// Main loop of the relayer.
2125
    /// Runs in a separate thread.
2126
    /// Continuously receives from `relay_rcv`.
2127
    /// Wakes up once per second to see if we need to continue mining an ongoing tenure.
2128
    pub fn main(mut self, relay_rcv: Receiver<RelayerDirective>) {
241✔
2129
        debug!("relayer thread ID is {:?}", std::thread::current().id());
241✔
2130

2131
        self.next_initiative =
241✔
2132
            Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay);
241✔
2133

2134
        // how often we perform a loop pass below
2135
        let poll_frequency_ms = 1_000;
241✔
2136

2137
        while self.globals.keep_running() {
337,368✔
2138
            self.check_tenure_timers();
337,147✔
2139
            let raised_initiative = self.globals.take_initiative();
337,147✔
2140
            let timed_out = Instant::now() >= self.next_initiative;
337,147✔
2141
            let initiative_directive = if raised_initiative.is_some() || timed_out {
337,147✔
2142
                self.next_initiative =
277,744✔
2143
                    Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay);
277,744✔
2144
                self.initiative()
277,744✔
2145
                    .inspect_err(|e| {
277,744✔
2146
                        error!("Error while getting directive from initiative()"; "err" => ?e);
×
2147
                    })
×
2148
                    .ok()
277,744✔
2149
                    .flatten()
277,744✔
2150
            } else {
2151
                None
59,403✔
2152
            };
2153

2154
            let directive_opt = initiative_directive.or_else(|| {
337,147✔
2155
                // do a time-bound recv on the relayer channel so that we can hit the `initiative()` invocation
2156
                //  and keep_running() checks on each loop iteration
2157
                match relay_rcv.recv_timeout(Duration::from_millis(poll_frequency_ms)) {
294,056✔
2158
                    Ok(directive) => {
287,419✔
2159
                        // only do this once, so we can call .initiative() again
2160
                        Some(directive)
287,419✔
2161
                    }
2162
                    Err(RecvTimeoutError::Timeout) => None,
6,637✔
2163
                    Err(RecvTimeoutError::Disconnected) => {
2164
                        warn!("Relayer receive channel disconnected. Exiting relayer thread");
×
2165
                        Some(RelayerDirective::Exit)
×
2166
                    }
2167
                }
2168
            });
294,056✔
2169

2170
            if let Some(directive) = directive_opt {
337,147✔
2171
                debug!("Relayer: main loop directive";
330,510✔
2172
                       "directive" => %directive,
2173
                       "raised_initiative" => ?raised_initiative,
2174
                       "timed_out" => %timed_out);
2175

2176
                if !self.handle_directive(directive) {
330,510✔
2177
                    break;
20✔
2178
                }
330,490✔
2179
            }
6,637✔
2180
        }
2181

2182
        // kill miner if it's running
2183
        signal_mining_blocked(self.globals.get_miner_status());
241✔
2184

2185
        // set termination flag so other threads die
2186
        self.globals.signal_stop();
241✔
2187

2188
        debug!("Relayer exit!");
241✔
2189
    }
241✔
2190

2191
    /// Try loading up a saved VRF key
2192
    pub(crate) fn load_saved_vrf_key(path: &str, pubkey_hash: &Hash160) -> Option<RegisteredKey> {
38✔
2193
        let mut f = match fs::File::open(path) {
38✔
2194
            Ok(f) => f,
38✔
2195
            Err(e) => {
×
2196
                warn!("Could not open {path}: {e:?}");
×
2197
                return None;
×
2198
            }
2199
        };
2200
        let mut registered_key_bytes = vec![];
38✔
2201
        if let Err(e) = f.read_to_end(&mut registered_key_bytes) {
38✔
2202
            warn!("Failed to read registered key bytes from {path}: {e:?}");
×
2203
            return None;
×
2204
        }
38✔
2205

2206
        let Ok(registered_key) = serde_json::from_slice::<RegisteredKey>(&registered_key_bytes)
38✔
2207
        else {
2208
            warn!("Did not load registered key from {path}: could not decode JSON");
×
2209
            return None;
×
2210
        };
2211

2212
        // Check that the loaded key's memo matches the current miner's key
2213
        if registered_key.memo != pubkey_hash.as_ref() {
38✔
2214
            warn!("Loaded VRF key does not match mining key");
37✔
2215
            return None;
37✔
2216
        }
1✔
2217

2218
        info!("Loaded registered key from {path}");
1✔
2219
        Some(registered_key)
1✔
2220
    }
38✔
2221

2222
    /// Top-level dispatcher
2223
    pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool {
330,490✔
2224
        debug!("Relayer: handling directive"; "directive" => %directive);
330,490✔
2225
        let continue_running = match directive {
330,490✔
2226
            RelayerDirective::HandleNetResult(net_result) => {
282,985✔
2227
                self.process_network_result(net_result);
282,985✔
2228
                true
282,985✔
2229
            }
2230
            // RegisterKey directives mean that the relayer should try to register a new VRF key.
2231
            // These are triggered by the relayer waking up without an active VRF key.
2232
            RelayerDirective::RegisterKey(last_burn_block) => {
38✔
2233
                if !self.is_miner {
38✔
2234
                    return true;
×
2235
                }
38✔
2236
                if self.globals.in_initial_block_download() {
38✔
2237
                    info!("In initial block download, will not submit VRF registration");
×
2238
                    return true;
×
2239
                }
38✔
2240
                let mut saved_key_opt = None;
38✔
2241
                if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() {
38✔
2242
                    saved_key_opt =
38✔
2243
                        Self::load_saved_vrf_key(path, &self.keychain.get_nakamoto_pkh());
38✔
2244
                }
38✔
2245
                if let Some(saved_key) = saved_key_opt {
38✔
2246
                    debug!("Relayer: resuming VRF key");
1✔
2247
                    self.globals.resume_leader_key(saved_key);
1✔
2248
                } else {
2249
                    self.rotate_vrf_and_register(&last_burn_block);
37✔
2250
                    debug!("Relayer: directive Registered VRF key");
37✔
2251
                }
2252
                self.globals.counters.bump_blocks_processed();
38✔
2253
                true
38✔
2254
            }
2255
            // ProcessedBurnBlock directives correspond to a new sortition perhaps occurring.
2256
            //  relayer should invoke `handle_sortition` to determine if they won the sortition,
2257
            //  and to start their miner, or stop their miner if an active tenure is now ending
2258
            RelayerDirective::ProcessedBurnBlock(consensus_hash, burn_hash, block_header_hash) => {
4,415✔
2259
                if !self.is_miner {
4,415✔
2260
                    return true;
×
2261
                }
4,415✔
2262
                if self.globals.in_initial_block_download() {
4,415✔
2263
                    debug!("In initial block download, will not check sortition for miner");
×
2264
                    return true;
×
2265
                }
4,415✔
2266
                self.handle_sortition(
4,415✔
2267
                    consensus_hash,
4,415✔
2268
                    burn_hash,
4,415✔
2269
                    StacksBlockId(block_header_hash.0),
4,415✔
2270
                )
2271
            }
2272
            // These are triggered by the relayer waking up, seeing a new consensus hash *or* a new first tenure block
2273
            RelayerDirective::IssueBlockCommit(..) => {
2274
                if !self.is_miner {
43,052✔
2275
                    return true;
×
2276
                }
43,052✔
2277
                if self.globals.in_initial_block_download() {
43,052✔
2278
                    debug!("In initial block download, will not issue block commit");
×
2279
                    return true;
×
2280
                }
43,052✔
2281
                if let Err(e) = self.issue_block_commit() {
43,052✔
2282
                    warn!("Relayer failed to issue block commit"; "err" => ?e);
100✔
2283
                }
42,952✔
2284
                true
43,052✔
2285
            }
2286
            RelayerDirective::Exit => false,
×
2287
        };
2288
        debug!("Relayer: handled directive"; "continue_running" => continue_running);
330,490✔
2289
        continue_running
330,490✔
2290
    }
330,490✔
2291

2292
    /// Reload config.burnchain to see if burn_fee_cap has changed.
2293
    /// If it has, update the miner spend amount and return true.
2294
    pub fn check_burnchain_config_changed(&self) -> (bool, BurnchainConfig) {
203,566✔
2295
        let burnchain_config = self.config.get_burnchain_config();
203,566✔
2296
        let last_burnchain_config_opt = self.globals.get_last_burnchain_config();
203,566✔
2297
        let burnchain_config_changed =
203,566✔
2298
            if let Some(last_burnchain_config) = last_burnchain_config_opt {
203,566✔
2299
                last_burnchain_config != burnchain_config
203,329✔
2300
            } else {
2301
                false
237✔
2302
            };
2303

2304
        self.globals
203,566✔
2305
            .set_last_miner_spend_amount(burnchain_config.burn_fee_cap);
203,566✔
2306
        self.globals
203,566✔
2307
            .set_last_burnchain_config(burnchain_config.clone());
203,566✔
2308

2309
        set_mining_spend_amount(
203,566✔
2310
            self.globals.get_miner_status(),
203,566✔
2311
            burnchain_config.burn_fee_cap,
203,566✔
2312
        );
2313

2314
        (burnchain_config_changed, burnchain_config)
203,566✔
2315
    }
203,566✔
2316

2317
    pub fn check_miner_config_changed(&self) -> bool {
200,660✔
2318
        let miner_config = self.config.get_miner_config();
200,660✔
2319
        let last_miner_config_opt = self.globals.get_last_miner_config();
200,660✔
2320
        let miner_config_changed = if let Some(last_miner_config) = last_miner_config_opt {
200,660✔
2321
            last_miner_config != miner_config
200,423✔
2322
        } else {
2323
            false
237✔
2324
        };
2325

2326
        self.globals.set_last_miner_config(miner_config);
200,660✔
2327

2328
        miner_config_changed
200,660✔
2329
    }
200,660✔
2330
}
2331

2332
#[cfg(test)]
2333
pub mod test {
2334
    use std::fs::File;
2335
    use std::io::Write;
2336
    use std::path::Path;
2337
    use std::time::Duration;
2338
    use std::u64;
2339

2340
    use rand::{thread_rng, Rng};
2341
    use stacks::burnchains::Txid;
2342
    use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash};
2343
    use stacks::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId, TrieHash};
2344
    use stacks::util::hash::Hash160;
2345
    use stacks::util::secp256k1::Secp256k1PublicKey;
2346
    use stacks::util::vrf::VRFPublicKey;
2347

2348
    use super::{BurnBlockCommitTimer, RelayerThread};
2349
    use crate::nakamoto_node::save_activated_vrf_key;
2350
    use crate::run_loop::RegisteredKey;
2351
    use crate::Keychain;
2352

2353
    #[test]
2354
    fn load_nonexistent_vrf_key() {
×
2355
        let keychain = Keychain::default(vec![0u8; 32]);
×
2356
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
×
2357
        let pubkey_hash = Hash160::from_node_public_key(&pk);
×
2358

2359
        let path = "/tmp/does_not_exist.json";
×
2360
        _ = std::fs::remove_file(path);
×
2361

2362
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
×
2363
        assert!(res.is_none());
×
2364
    }
×
2365

2366
    #[test]
2367
    fn load_empty_vrf_key() {
×
2368
        let keychain = Keychain::default(vec![0u8; 32]);
×
2369
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
×
2370
        let pubkey_hash = Hash160::from_node_public_key(&pk);
×
2371

2372
        let path = "/tmp/empty.json";
×
2373
        File::create(path).expect("Failed to create test file");
×
2374
        assert!(Path::new(path).exists());
×
2375

2376
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
×
2377
        assert!(res.is_none());
×
2378

2379
        std::fs::remove_file(path).expect("Failed to delete test file");
×
2380
    }
×
2381

2382
    #[test]
2383
    fn load_bad_vrf_key() {
×
2384
        let keychain = Keychain::default(vec![0u8; 32]);
×
2385
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
×
2386
        let pubkey_hash = Hash160::from_node_public_key(&pk);
×
2387

2388
        let path = "/tmp/invalid_saved_key.json";
×
2389
        let json_content = r#"{ "hello": "world" }"#;
×
2390

2391
        // Write the JSON content to the file
2392
        let mut file = File::create(path).expect("Failed to create test file");
×
2393
        file.write_all(json_content.as_bytes())
×
2394
            .expect("Failed to write to test file");
×
2395
        assert!(Path::new(path).exists());
×
2396

2397
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
×
2398
        assert!(res.is_none());
×
2399

2400
        std::fs::remove_file(path).expect("Failed to delete test file");
×
2401
    }
×
2402

2403
    #[test]
2404
    fn save_load_vrf_key() {
×
2405
        let keychain = Keychain::default(vec![0u8; 32]);
×
2406
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
×
2407
        let pubkey_hash = Hash160::from_node_public_key(&pk);
×
2408
        let key = RegisteredKey {
×
2409
            target_block_height: 101,
×
2410
            block_height: 102,
×
2411
            op_vtxindex: 1,
×
2412
            vrf_public_key: VRFPublicKey::from_hex(
×
2413
                "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71",
×
2414
            )
×
2415
            .unwrap(),
×
2416
            memo: pubkey_hash.as_ref().to_vec(),
×
2417
        };
×
2418
        let path = "/tmp/vrf_key.json";
×
2419
        save_activated_vrf_key(path, &key);
×
2420

2421
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
×
2422
        assert!(res.is_some());
×
2423

2424
        std::fs::remove_file(path).expect("Failed to delete test file");
×
2425
    }
×
2426

2427
    #[test]
2428
    fn invalid_saved_memo() {
×
2429
        let keychain = Keychain::default(vec![0u8; 32]);
×
2430
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
×
2431
        let pubkey_hash = Hash160::from_node_public_key(&pk);
×
2432
        let key = RegisteredKey {
×
2433
            target_block_height: 101,
×
2434
            block_height: 102,
×
2435
            op_vtxindex: 1,
×
2436
            vrf_public_key: VRFPublicKey::from_hex(
×
2437
                "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71",
×
2438
            )
×
2439
            .unwrap(),
×
2440
            memo: pubkey_hash.as_ref().to_vec(),
×
2441
        };
×
2442
        let path = "/tmp/vrf_key.json";
×
2443
        save_activated_vrf_key(path, &key);
×
2444

2445
        let keychain = Keychain::default(vec![1u8; 32]);
×
2446
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
×
2447
        let pubkey_hash = Hash160::from_node_public_key(&pk);
×
2448

2449
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
×
2450
        assert!(res.is_none());
×
2451

2452
        std::fs::remove_file(path).expect("Failed to delete test file");
×
2453
    }
×
2454

2455
    #[test]
2456
    fn check_need_block_found() {
×
2457
        let consensus_hash_byte = thread_rng().gen();
×
2458
        let canonical_stacks_snapshot = BlockSnapshot {
×
2459
            block_height: thread_rng().gen::<u64>().wrapping_add(1), // Add one to ensure we can always decrease by 1 without underflowing.
×
2460
            burn_header_timestamp: thread_rng().gen(),
×
2461
            burn_header_hash: BurnchainHeaderHash([thread_rng().gen(); 32]),
×
2462
            consensus_hash: ConsensusHash([consensus_hash_byte; 20]),
×
2463
            parent_burn_header_hash: BurnchainHeaderHash([thread_rng().gen(); 32]),
×
2464
            ops_hash: OpsHash([thread_rng().gen(); 32]),
×
2465
            total_burn: thread_rng().gen(),
×
2466
            sortition: true,
×
2467
            sortition_hash: SortitionHash([thread_rng().gen(); 32]),
×
2468
            winning_block_txid: Txid([thread_rng().gen(); 32]),
×
2469
            winning_stacks_block_hash: BlockHeaderHash([thread_rng().gen(); 32]),
×
2470
            index_root: TrieHash([thread_rng().gen(); 32]),
×
2471
            num_sortitions: thread_rng().gen(),
×
2472
            stacks_block_accepted: true,
×
2473
            stacks_block_height: thread_rng().gen(),
×
2474
            arrival_index: thread_rng().gen(),
×
2475
            canonical_stacks_tip_consensus_hash: ConsensusHash([thread_rng().gen(); 20]),
×
2476
            canonical_stacks_tip_hash: BlockHeaderHash([thread_rng().gen(); 32]),
×
2477
            canonical_stacks_tip_height: thread_rng().gen(),
×
2478
            sortition_id: SortitionId([thread_rng().gen(); 32]),
×
2479
            parent_sortition_id: SortitionId([thread_rng().gen(); 32]),
×
2480
            pox_valid: true,
×
2481
            accumulated_coinbase_ustx: thread_rng().gen::<u64>() as u128,
×
2482
            miner_pk_hash: Some(Hash160([thread_rng().gen(); 20])),
×
2483
        };
×
2484

2485
        // The consensus_hashes are the same, and the block heights are the same. Therefore, don't need a block found.
2486
        let last_winning_block_snapshot = canonical_stacks_snapshot.clone();
×
2487
        assert!(!RelayerThread::need_block_found(
×
2488
            &canonical_stacks_snapshot,
×
2489
            &last_winning_block_snapshot
×
2490
        ));
×
2491

2492
        // The block height of the canonical tip is higher than the last winning snapshot. We already issued a block found.
2493
        let mut canonical_stacks_snapshot_is_higher_than_last_winning_snapshot =
×
2494
            last_winning_block_snapshot.clone();
×
2495
        canonical_stacks_snapshot_is_higher_than_last_winning_snapshot.block_height =
×
2496
            canonical_stacks_snapshot.block_height.saturating_sub(1);
×
2497
        assert!(!RelayerThread::need_block_found(
×
2498
            &canonical_stacks_snapshot,
×
2499
            &canonical_stacks_snapshot_is_higher_than_last_winning_snapshot
×
2500
        ));
×
2501

2502
        // The block height is the same, but we have different consensus hashes. We need to issue a block found.
2503
        let mut tip_consensus_hash_mismatch = last_winning_block_snapshot.clone();
×
2504
        tip_consensus_hash_mismatch.consensus_hash =
×
2505
            ConsensusHash([consensus_hash_byte.wrapping_add(1); 20]);
×
2506
        assert!(RelayerThread::need_block_found(
×
2507
            &canonical_stacks_snapshot,
×
2508
            &tip_consensus_hash_mismatch
×
2509
        ));
2510

2511
        // The block height is the same, but we have different consensus hashes. We need to issue a block found.
2512
        let mut tip_consensus_hash_mismatch = last_winning_block_snapshot.clone();
×
2513
        tip_consensus_hash_mismatch.consensus_hash =
×
2514
            ConsensusHash([consensus_hash_byte.wrapping_add(1); 20]);
×
2515
        assert!(RelayerThread::need_block_found(
×
2516
            &canonical_stacks_snapshot,
×
2517
            &tip_consensus_hash_mismatch
×
2518
        ));
2519

2520
        // The block height of the canonical tip is lower than the last winning snapshot blockheight. We need to issue a block found.
2521
        let mut canonical_stacks_snapshot_is_lower_than_last_winning_snapshot =
×
2522
            last_winning_block_snapshot.clone();
×
2523
        canonical_stacks_snapshot_is_lower_than_last_winning_snapshot.block_height =
×
2524
            canonical_stacks_snapshot.block_height.saturating_add(1);
×
2525
        assert!(RelayerThread::need_block_found(
×
2526
            &canonical_stacks_snapshot,
×
2527
            &canonical_stacks_snapshot_is_lower_than_last_winning_snapshot
×
2528
        ));
2529
    }
×
2530

2531
    #[test]
2532
    fn burn_block_commit_timer_units() {
×
2533
        let mut burn_block_timer = BurnBlockCommitTimer::NotSet;
×
2534
        assert_eq!(burn_block_timer.elapsed_secs(), 0);
×
2535

2536
        let ch_0 = ConsensusHash([0; 20]);
×
2537
        let ch_1 = ConsensusHash([1; 20]);
×
2538
        let ch_2 = ConsensusHash([2; 20]);
×
2539

2540
        assert!(!burn_block_timer.is_ready(&ch_0, &Duration::from_secs(1)));
×
2541
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
×
2542
            panic!("The burn block timer should be set");
×
2543
        };
2544
        assert_eq!(burn_tip, &ch_0);
×
2545

2546
        std::thread::sleep(Duration::from_secs(1));
×
2547

2548
        assert!(burn_block_timer.is_ready(&ch_0, &Duration::from_secs(0)));
×
2549
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
×
2550
            panic!("The burn block timer should be set");
×
2551
        };
2552
        assert_eq!(burn_tip, &ch_0);
×
2553

2554
        assert!(!burn_block_timer.is_ready(&ch_1, &Duration::from_secs(0)));
×
2555
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
×
2556
            panic!("The burn block timer should be set");
×
2557
        };
2558
        assert_eq!(burn_tip, &ch_1);
×
2559

2560
        assert!(!burn_block_timer.is_ready(&ch_1, &Duration::from_secs(u64::MAX)));
×
2561
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
×
2562
            panic!("The burn block timer should be set");
×
2563
        };
2564
        assert_eq!(burn_tip, &ch_1);
×
2565

2566
        std::thread::sleep(Duration::from_secs(1));
×
2567
        assert!(!burn_block_timer.is_ready(&ch_2, &Duration::from_secs(0)));
×
2568
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
×
2569
            panic!("The burn block timer should be set");
×
2570
        };
2571
        assert_eq!(burn_tip, &ch_2);
×
2572
    }
×
2573
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc