• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

stacks-network / stacks-core / 23509724014

24 Mar 2026 08:02PM UTC coverage: 85.676% (-0.04%) from 85.712%
23509724014

Pull #7031

github

2faee1
web-flow
Merge 8b0e95be4 into 1e36cefa9
Pull Request #7031: Updated ci.yml to have a 'v2' of code coverage action, which is more robust, is skipped when prior steps are skipped, and uses env vars instead of hard-coding

186502 of 217682 relevant lines covered (85.68%)

17190821.87 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.83
/stacks-node/src/nakamoto_node/relayer.rs
1
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
2
// Copyright (C) 2020-2023 Stacks Open Internet Foundation
3
//
4
// This program is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8
//
9
// This program is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13
//
14
// You should have received a copy of the GNU General Public License
15
// along with this program.  If not, see <http://www.gnu.org/licenses/>.
16
use core::fmt;
17
use std::io::Read;
18
use std::sync::atomic::{AtomicBool, Ordering};
19
use std::sync::mpsc::{Receiver, RecvTimeoutError};
20
use std::sync::Arc;
21
#[cfg(test)]
22
use std::sync::LazyLock;
23
use std::thread::JoinHandle;
24
use std::time::{Duration, Instant};
25
use std::{fs, thread};
26

27
use rand::{thread_rng, Rng};
28
use stacks::burnchains::{Burnchain, Txid};
29
use stacks::chainstate::burn::db::sortdb::{FindIter, SortitionDB};
30
use stacks::chainstate::burn::operations::leader_block_commit::{
31
    RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS,
32
};
33
use stacks::chainstate::burn::operations::{
34
    BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp,
35
};
36
use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash};
37
use stacks::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients;
38
use stacks::chainstate::nakamoto::{NakamotoBlockHeader, NakamotoChainState};
39
use stacks::chainstate::stacks::address::PoxAddress;
40
use stacks::chainstate::stacks::db::StacksChainState;
41
use stacks::chainstate::stacks::miner::{
42
    set_mining_spend_amount, signal_mining_blocked, signal_mining_ready,
43
};
44
use stacks::chainstate::stacks::Error as ChainstateError;
45
use stacks::config::BurnchainConfig;
46
use stacks::core::mempool::MemPoolDB;
47
use stacks::core::STACKS_EPOCH_LATEST_MARKER;
48
use stacks::monitoring::increment_stx_blocks_mined_counter;
49
use stacks::net::db::LocalPeer;
50
use stacks::net::p2p::NetworkHandle;
51
use stacks::net::relay::Relayer;
52
use stacks::net::NetworkResult;
53
use stacks::util_lib::db::Error as DbError;
54
use stacks_common::types::chainstate::{
55
    BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed,
56
};
57
use stacks_common::types::StacksEpochId;
58
use stacks_common::util::get_epoch_time_ms;
59
use stacks_common::util::hash::Hash160;
60
#[cfg(test)]
61
use stacks_common::util::tests::TestFlag;
62
use stacks_common::util::vrf::VRFPublicKey;
63

64
use super::miner::MinerReason;
65
use super::{
66
    Config, Error as NakamotoNodeError, EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE,
67
};
68
use crate::burnchains::BurnchainController;
69
use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective};
70
use crate::neon_node::{
71
    fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState,
72
};
73
use crate::run_loop::nakamoto::{Globals, RunLoop};
74
use crate::run_loop::RegisteredKey;
75
use crate::BitcoinRegtestController;
76

77
#[cfg(test)]
78
/// Mutex to stall the relayer thread right before it creates a miner thread.
79
pub static TEST_MINER_THREAD_STALL: LazyLock<TestFlag<bool>> = LazyLock::new(TestFlag::default);
80

81
#[cfg(test)]
82
/// Mutex to stall the miner thread right after it starts up (does not block the relayer thread)
83
pub static TEST_MINER_THREAD_START_STALL: LazyLock<TestFlag<bool>> =
84
    LazyLock::new(TestFlag::default);
85

86
#[cfg(test)]
87
/// Test flag to set the tip for the miner to commit to
88
pub static TEST_MINER_COMMIT_TIP: LazyLock<TestFlag<Option<(ConsensusHash, BlockHeaderHash)>>> =
89
    LazyLock::new(TestFlag::default);
90

91
/// Command types for the Nakamoto relayer thread, issued to it by other threads
92
#[allow(clippy::large_enum_variant)]
93
pub enum RelayerDirective {
94
    /// Handle some new data that arrived on the network (such as blocks, transactions, and
95
    HandleNetResult(NetworkResult),
96
    /// A new burn block has been processed by the SortitionDB, check if this miner won sortition,
97
    ///  and if so, start the miner thread
98
    ProcessedBurnBlock(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash),
99
    /// Either a new burn block has been processed (without a miner active yet) or a
100
    ///  nakamoto tenure's first block has been processed, so the relayer should issue
101
    ///  a block commit
102
    IssueBlockCommit(ConsensusHash, BlockHeaderHash),
103
    /// Try to register a VRF public key
104
    RegisterKey(BlockSnapshot),
105
    /// Stop the relayer thread
106
    Exit,
107
}
108

109
impl fmt::Display for RelayerDirective {
110
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
×
111
        match self {
×
112
            RelayerDirective::HandleNetResult(_) => write!(f, "HandleNetResult"),
×
113
            RelayerDirective::ProcessedBurnBlock(_, _, _) => write!(f, "ProcessedBurnBlock"),
×
114
            RelayerDirective::IssueBlockCommit(_, _) => write!(f, "IssueBlockCommit"),
×
115
            RelayerDirective::RegisterKey(_) => write!(f, "RegisterKey"),
×
116
            RelayerDirective::Exit => write!(f, "Exit"),
×
117
        }
118
    }
×
119
}
120

121
/// Last commitment data
122
/// This represents the tenure that the last-sent block-commit committed to.
123
pub struct LastCommit {
124
    /// block-commit sent
125
    block_commit: LeaderBlockCommitOp,
126
    /// the sortition tip at the time the block-commit was sent
127
    burn_tip: BlockSnapshot,
128
    /// the stacks tip at the time the block-commit was sent
129
    stacks_tip: StacksBlockId,
130
    /// the tenure consensus hash for the tip's tenure
131
    tenure_consensus_hash: ConsensusHash,
132
    /// the start-block hash of the tip's tenure
133
    #[allow(dead_code)]
134
    start_block_hash: BlockHeaderHash,
135
    /// What is the epoch in which this was sent?
136
    epoch_id: StacksEpochId,
137
    /// commit txid (to be filled in on submission)
138
    txid: Option<Txid>,
139
}
140

141
/// Timer used to check whether or not a burnchain view change has
142
///  waited long enough to issue a burn commit without a tenure change
143
enum BurnBlockCommitTimer {
144
    /// The timer hasn't been set: we aren't currently waiting to submit a commit
145
    NotSet,
146
    /// The timer is set, and has been set for a particular burn view
147
    Set {
148
        start_time: Instant,
149
        /// This is the canonical sortition at the time that the
150
        ///  timer began. This is used to make sure we aren't reusing
151
        ///  the timeout between sortitions
152
        burn_tip: ConsensusHash,
153
    },
154
}
155

156
impl BurnBlockCommitTimer {
157
    /// Check if the timer has expired (and was set).
158
    /// If the timer was not set, then set it.
159
    ///
160
    /// Returns true if the timer expired
161
    fn is_ready(&mut self, current_burn_tip: &ConsensusHash, timeout: &Duration) -> bool {
45,244✔
162
        let needs_reset = match self {
45,244✔
163
            BurnBlockCommitTimer::NotSet => true,
221✔
164
            BurnBlockCommitTimer::Set {
165
                start_time,
45,023✔
166
                burn_tip,
45,023✔
167
            } => {
168
                if burn_tip != current_burn_tip {
45,023✔
169
                    true
985✔
170
                } else {
171
                    if start_time.elapsed() > *timeout {
44,038✔
172
                        // timer expired and was pointed at the correct burn tip
173
                        // so we can just return is_ready here
174
                        return true;
10,488✔
175
                    }
33,550✔
176
                    // timer didn't expire, but the burn tip was correct, so
177
                    //  we don't need to reset the timer
178
                    false
33,550✔
179
                }
180
            }
181
        };
182
        if needs_reset {
34,756✔
183
            info!(
1,206✔
184
                "Starting new tenure timeout";
185
                "timeout_secs" => timeout.as_secs(),
1,206✔
186
                "burn_tip_ch" => %current_burn_tip
187
            );
188
            *self = Self::Set {
1,206✔
189
                burn_tip: current_burn_tip.clone(),
1,206✔
190
                start_time: Instant::now(),
1,206✔
191
            };
1,206✔
192
        }
33,550✔
193

194
        debug!(
34,756✔
195
            "Waiting for tenure timeout before issuing commit";
196
            "elapsed_secs" => self.elapsed_secs(),
×
197
            "burn_tip_ch" => %current_burn_tip
198
        );
199

200
        false
34,756✔
201
    }
45,244✔
202

203
    /// At what time, if set, would this timer be ready?
204
    fn deadline(&self, timeout: &Duration) -> Option<Instant> {
34,752✔
205
        match self {
34,752✔
206
            BurnBlockCommitTimer::NotSet => None,
×
207
            BurnBlockCommitTimer::Set { start_time, .. } => Some(*start_time + *timeout),
34,752✔
208
        }
209
    }
34,752✔
210

211
    /// How much time has elapsed on the current timer?
212
    fn elapsed_secs(&self) -> u64 {
1✔
213
        match self {
1✔
214
            BurnBlockCommitTimer::NotSet => 0,
1✔
215
            BurnBlockCommitTimer::Set { start_time, .. } => start_time.elapsed().as_secs(),
×
216
        }
217
    }
1✔
218
}
219

220
impl LastCommit {
221
    pub fn new(
2,723✔
222
        commit: LeaderBlockCommitOp,
2,723✔
223
        burn_tip: BlockSnapshot,
2,723✔
224
        stacks_tip: StacksBlockId,
2,723✔
225
        tenure_consensus_hash: ConsensusHash,
2,723✔
226
        start_block_hash: BlockHeaderHash,
2,723✔
227
        epoch_id: StacksEpochId,
2,723✔
228
    ) -> Self {
2,723✔
229
        Self {
2,723✔
230
            block_commit: commit,
2,723✔
231
            burn_tip,
2,723✔
232
            stacks_tip,
2,723✔
233
            tenure_consensus_hash,
2,723✔
234
            start_block_hash,
2,723✔
235
            epoch_id,
2,723✔
236
            txid: None,
2,723✔
237
        }
2,723✔
238
    }
2,723✔
239

240
    /// Get the commit
241
    pub fn get_block_commit(&self) -> &LeaderBlockCommitOp {
2,723✔
242
        &self.block_commit
2,723✔
243
    }
2,723✔
244

245
    /// What's the parent tenure's tenure-start block hash?
246
    pub fn parent_tenure_id(&self) -> StacksBlockId {
×
247
        StacksBlockId(self.block_commit.block_header_hash.0)
×
248
    }
×
249

250
    /// What's the stacks tip at the time of commit?
251
    pub fn get_stacks_tip(&self) -> &StacksBlockId {
×
252
        &self.stacks_tip
×
253
    }
×
254

255
    /// What's the burn tip at the time of commit?
256
    pub fn get_burn_tip(&self) -> &BlockSnapshot {
216,624✔
257
        &self.burn_tip
216,624✔
258
    }
216,624✔
259

260
    /// What's the epoch in which this was sent?
261
    pub fn get_epoch_id(&self) -> &StacksEpochId {
2,723✔
262
        &self.epoch_id
2,723✔
263
    }
2,723✔
264

265
    /// Get the tenure ID of the tenure this commit builds on
266
    pub fn get_tenure_id(&self) -> &ConsensusHash {
261,942✔
267
        &self.tenure_consensus_hash
261,942✔
268
    }
261,942✔
269

270
    /// Set our txid
271
    pub fn set_txid(&mut self, txid: &Txid) {
1,647✔
272
        self.txid = Some(txid.clone());
1,647✔
273
    }
1,647✔
274
}
275

276
pub type MinerThreadJoinHandle = JoinHandle<Result<(), NakamotoNodeError>>;
277

278
/// Miner thread join handle, as well as an "abort" flag to force the miner thread to exit when it
279
/// is blocked.
280
pub struct MinerStopHandle {
281
    /// The join handle itself
282
    join_handle: MinerThreadJoinHandle,
283
    /// The relayer-set abort flag
284
    abort_flag: Arc<AtomicBool>,
285
}
286

287
impl MinerStopHandle {
288
    pub fn new(join_handle: MinerThreadJoinHandle, abort_flag: Arc<AtomicBool>) -> Self {
1,525✔
289
        Self {
1,525✔
290
            join_handle,
1,525✔
291
            abort_flag,
1,525✔
292
        }
1,525✔
293
    }
1,525✔
294

295
    /// Get a ref to the inner thread object
296
    pub fn inner_thread(&self) -> &std::thread::Thread {
1,508✔
297
        self.join_handle.thread()
1,508✔
298
    }
1,508✔
299

300
    /// Destroy this stop handle to get the thread join handle
301
    pub fn into_inner(self) -> MinerThreadJoinHandle {
1,299✔
302
        self.join_handle
1,299✔
303
    }
1,299✔
304

305
    /// Stop the inner miner thread.
306
    /// Blocks the miner, and sets the abort flag so that a blocked miner will error out.
307
    pub fn stop(self, globals: &Globals) -> Result<(), NakamotoNodeError> {
1,299✔
308
        let my_id = thread::current().id();
1,299✔
309
        let prior_thread_id = self.inner_thread().id();
1,299✔
310
        debug!(
1,299✔
311
            "[Thread {:?}]: Stopping prior miner thread ID {:?}",
312
            &my_id, &prior_thread_id
×
313
        );
314

315
        self.abort_flag.store(true, Ordering::SeqCst);
1,299✔
316
        globals.block_miner();
1,299✔
317

318
        let prior_miner = self.into_inner();
1,299✔
319
        let prior_miner_result = prior_miner.join().map_err(|_| {
1,299✔
320
            error!("Miner: failed to join prior miner");
×
321
            ChainstateError::MinerAborted
×
322
        })?;
×
323
        debug!("Stopped prior miner thread ID {:?}", &prior_thread_id);
1,299✔
324
        if let Err(e) = prior_miner_result {
1,299✔
325
            // it's okay if the prior miner thread exited with an error.
326
            // in many cases this is expected (i.e., a burnchain block occurred)
327
            // if some error condition should be handled though, this is the place
328
            //  to do that handling.
329
            debug!("Prior mining thread exited with: {e:?}");
1,112✔
330
        }
187✔
331

332
        globals.unblock_miner();
1,299✔
333
        Ok(())
1,299✔
334
    }
1,299✔
335
}
336

337
/// The reason for issuing a tenure extend
338
#[derive(PartialEq, Eq, Debug, Clone)]
339
pub enum TenureExtendReason {
340
    /// There was an empty sortition
341
    EmptySortition,
342
    /// There was a bad sortition winner
343
    BadSortitionWinner,
344
    /// We are waiting for the current winner to produce a block.
345
    UnresponsiveWinner,
346
}
347

348
/// Information necessary to determine when to extend a tenure
349
#[derive(Clone)]
350
pub struct TenureExtendTime {
351
    /// The time at which we determined that we should tenure-extend
352
    time: Instant,
353
    /// The amount of time we should wait before tenure-extending
354
    timeout: Duration,
355
    /// The reason for tenure-extending
356
    reason: TenureExtendReason,
357
}
358

359
impl TenureExtendTime {
360
    /// Create a new `TenureExtendTime` for an UnresponsiveWinner with the specified `timeout`
361
    pub fn unresponsive_winner(timeout: Duration) -> Self {
53✔
362
        Self {
53✔
363
            time: Instant::now(),
53✔
364
            timeout,
53✔
365
            reason: TenureExtendReason::UnresponsiveWinner,
53✔
366
        }
53✔
367
    }
53✔
368

369
    /// Create a new `TenureExtendTime` with the provided `reason` and no `timeout`
370
    pub fn immediate(reason: TenureExtendReason) -> Self {
53✔
371
        Self {
53✔
372
            time: Instant::now(),
53✔
373
            timeout: Duration::from_millis(0),
53✔
374
            reason,
53✔
375
        }
53✔
376
    }
53✔
377

378
    /// Should we attempt to tenure-extend?
379
    pub fn should_extend(&self) -> bool {
58,438✔
380
        // We set the time, but have we waited long enough?
381
        self.time.elapsed() > self.timeout
58,438✔
382
    }
58,438✔
383

384
    // Amount of time elapsed since we decided to tenure-extend
385
    pub fn elapsed(&self) -> Duration {
×
386
        self.time.elapsed()
×
387
    }
×
388

389
    // The timeout specified when we decided to tenure-extend
390
    pub fn timeout(&self) -> Duration {
×
391
        self.timeout
×
392
    }
×
393

394
    /// The reason for tenure-extending
395
    pub fn reason(&self) -> &TenureExtendReason {
275✔
396
        &self.reason
275✔
397
    }
275✔
398

399
    /// Update the timeout for this `TenureExtendTime` and reset the time
400
    pub fn refresh(&mut self, timeout: Duration) {
275✔
401
        self.timeout = timeout;
275✔
402
        self.time = Instant::now();
275✔
403
    }
275✔
404
}
405

406
/// Relayer thread
407
/// * accepts network results and stores blocks and microblocks
408
/// * forwards new blocks, microblocks, and transactions to the p2p thread
409
/// * issues (and re-issues) block commits to participate as a miner
410
/// * processes burnchain state to determine if selected as a miner
411
/// * if mining, runs the miner and broadcasts blocks (via a subordinate MinerThread)
412
pub struct RelayerThread {
413
    /// Node config
414
    pub(crate) config: Config,
415
    /// Handle to the sortition DB
416
    sortdb: SortitionDB,
417
    /// Handle to the chainstate DB
418
    chainstate: StacksChainState,
419
    /// Handle to the mempool DB
420
    mempool: MemPoolDB,
421
    /// Handle to global state and inter-thread communication channels
422
    pub(crate) globals: Globals,
423
    /// Authoritative copy of the keychain state
424
    pub(crate) keychain: Keychain,
425
    /// Burnchian configuration
426
    pub(crate) burnchain: Burnchain,
427
    /// height of last VRF key registration request
428
    last_vrf_key_burn_height: Option<u64>,
429
    /// client to the burnchain (used only for sending block-commits)
430
    pub(crate) bitcoin_controller: BitcoinRegtestController,
431
    /// client to the event dispatcher
432
    pub(crate) event_dispatcher: EventDispatcher,
433
    /// copy of the local peer state
434
    local_peer: LocalPeer,
435
    /// last observed burnchain block height from the p2p thread (obtained from network results)
436
    last_network_block_height: u64,
437
    /// time at which we observed a change in the network block height (epoch time in millis)
438
    last_network_block_height_ts: u128,
439
    /// last observed number of downloader state-machine passes from the p2p thread (obtained from
440
    /// network results)
441
    last_network_download_passes: u64,
442
    /// last observed number of inventory state-machine passes from the p2p thread (obtained from
443
    /// network results)
444
    last_network_inv_passes: u64,
445
    /// minimum number of downloader state-machine passes that must take place before mining (this
446
    /// is used to ensure that the p2p thread attempts to download new Stacks block data before
447
    /// this thread tries to mine a block)
448
    min_network_download_passes: u64,
449
    /// minimum number of inventory state-machine passes that must take place before mining (this
450
    /// is used to ensure that the p2p thread attempts to download new Stacks block data before
451
    /// this thread tries to mine a block)
452
    min_network_inv_passes: u64,
453

454
    /// Inner relayer instance for forwarding broadcasted data back to the p2p thread for dispatch
455
    /// to neighbors
456
    relayer: Relayer,
457

458
    /// handle to the subordinate miner thread
459
    miner_thread: Option<MinerStopHandle>,
460
    /// miner thread's burn view
461
    miner_thread_burn_view: Option<BlockSnapshot>,
462

463
    /// The relayer thread reads directives from the relay_rcv, but it also periodically wakes up
464
    ///  to check if it should issue a block commit or try to register a VRF key
465
    next_initiative: Instant,
466
    is_miner: bool,
467
    /// Information about the last-sent block commit, and the relayer's view of the chain at the
468
    /// time it was sent.
469
    last_committed: Option<LastCommit>,
470
    /// Timeout for waiting for the first block in a tenure before submitting a block commit
471
    new_tenure_timeout: BurnBlockCommitTimer,
472
    /// Time to wait before attempting a tenure extend
473
    tenure_extend_time: Option<TenureExtendTime>,
474
}
475

476
impl RelayerThread {
477
    /// Instantiate relayer thread.
478
    /// Uses `runloop` to obtain globals, config, and `is_miner`` status
479
    pub fn new(
239✔
480
        runloop: &RunLoop,
239✔
481
        local_peer: LocalPeer,
239✔
482
        relayer: Relayer,
239✔
483
        keychain: Keychain,
239✔
484
    ) -> RelayerThread {
239✔
485
        let config = runloop.config().clone();
239✔
486
        let globals = runloop.get_globals();
239✔
487
        let burn_db_path = config.get_burn_db_file_path();
239✔
488
        let is_miner = runloop.is_miner();
239✔
489

490
        let sortdb = SortitionDB::open(
239✔
491
            &burn_db_path,
239✔
492
            true,
493
            runloop.get_burnchain().pox_constants,
239✔
494
            Some(config.node.get_marf_opts()),
239✔
495
        )
496
        .expect("FATAL: failed to open burnchain DB");
239✔
497

498
        let chainstate =
239✔
499
            open_chainstate_with_faults(&config).expect("FATAL: failed to open chainstate DB");
239✔
500

501
        let mempool = config
239✔
502
            .connect_mempool_db()
239✔
503
            .expect("Database failure opening mempool");
239✔
504

505
        let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone());
239✔
506

507
        let next_initiative_delay = config.node.next_initiative_delay;
239✔
508

509
        RelayerThread {
239✔
510
            config,
239✔
511
            sortdb,
239✔
512
            chainstate,
239✔
513
            mempool,
239✔
514
            globals,
239✔
515
            keychain,
239✔
516
            burnchain: runloop.get_burnchain(),
239✔
517
            last_vrf_key_burn_height: None,
239✔
518
            bitcoin_controller,
239✔
519
            event_dispatcher: runloop.get_event_dispatcher(),
239✔
520
            local_peer,
239✔
521

239✔
522
            last_network_block_height: 0,
239✔
523
            last_network_block_height_ts: 0,
239✔
524
            last_network_download_passes: 0,
239✔
525
            min_network_download_passes: 0,
239✔
526
            last_network_inv_passes: 0,
239✔
527
            min_network_inv_passes: 0,
239✔
528

239✔
529
            relayer,
239✔
530

239✔
531
            miner_thread: None,
239✔
532
            miner_thread_burn_view: None,
239✔
533
            is_miner,
239✔
534
            next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay),
239✔
535
            last_committed: None,
239✔
536
            new_tenure_timeout: BurnBlockCommitTimer::NotSet,
239✔
537
            tenure_extend_time: None,
239✔
538
        }
239✔
539
    }
239✔
540

541
    /// Get a handle to the p2p thread
542
    pub fn get_p2p_handle(&self) -> NetworkHandle {
1,316✔
543
        self.relayer.get_p2p_handle()
1,316✔
544
    }
1,316✔
545

546
    /// have we waited for the right conditions under which to start mining a block off of our
547
    /// chain tip?
548
    fn has_waited_for_latest_blocks(&self) -> bool {
310,542✔
549
        // a network download pass took place
550
        self.min_network_download_passes <= self.last_network_download_passes
310,542✔
551
        // we waited long enough for a download pass, but timed out waiting
552
        || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms()
286,439✔
553
        // we're not supposed to wait at all
554
        || !self.config.miner.wait_for_block_download
10,213✔
555
    }
310,542✔
556

557
    /// Handle a NetworkResult from the p2p/http state machine.  Usually this is the act of
558
    /// * preprocessing and storing new blocks and microblocks
559
    /// * relaying blocks, microblocks, and transacctions
560
    /// * updating unconfirmed state views
561
    pub fn process_network_result(&mut self, mut net_result: NetworkResult) {
310,543✔
562
        debug!(
310,543✔
563
            "Relayer: Handle network result (from {})",
564
            net_result.burn_height
565
        );
566

567
        if self.last_network_block_height != net_result.burn_height {
310,543✔
568
            // burnchain advanced; disable mining until we also do a download pass.
1,714✔
569
            self.last_network_block_height = net_result.burn_height;
1,714✔
570
            self.min_network_download_passes = net_result.num_download_passes + 1;
1,714✔
571
            self.min_network_inv_passes = net_result.num_inv_sync_passes + 1;
1,714✔
572
            self.last_network_block_height_ts = get_epoch_time_ms();
1,714✔
573
        }
308,829✔
574

575
        let net_receipts = self
310,543✔
576
            .relayer
310,543✔
577
            .process_network_result(
310,543✔
578
                &self.local_peer,
310,543✔
579
                &mut net_result,
310,543✔
580
                &self.burnchain,
310,543✔
581
                &mut self.sortdb,
310,543✔
582
                &mut self.chainstate,
310,543✔
583
                &mut self.mempool,
310,543✔
584
                self.globals.sync_comms.get_ibd(),
310,543✔
585
                Some(&self.globals.coord_comms),
310,543✔
586
                Some(&self.event_dispatcher),
310,543✔
587
            )
588
            .expect("BUG: failure processing network results");
310,543✔
589

590
        if net_receipts.num_new_blocks > 0 {
310,543✔
591
            // if we received any new block data that could invalidate our view of the chain tip,
592
            // then stop mining until we process it
593
            debug!("Relayer: block mining to process newly-arrived blocks or microblocks");
13✔
594
            signal_mining_blocked(self.globals.get_miner_status());
13✔
595
        }
310,530✔
596

597
        let mempool_txs_added = net_receipts.mempool_txs_added.len();
310,543✔
598
        if mempool_txs_added > 0 {
310,543✔
599
            self.event_dispatcher
2,744✔
600
                .process_new_mempool_txs(net_receipts.mempool_txs_added);
2,744✔
601
        }
307,799✔
602

603
        // Dispatch retrieved attachments, if any.
604
        if net_result.has_attachments() {
310,543✔
605
            self.event_dispatcher
×
606
                .process_new_attachments(&net_result.attachments);
×
607
        }
310,543✔
608

609
        // resume mining if we blocked it, and if we've done the requisite download
610
        // passes
611
        self.last_network_download_passes = net_result.num_download_passes;
310,543✔
612
        self.last_network_inv_passes = net_result.num_inv_sync_passes;
310,543✔
613
        if self.has_waited_for_latest_blocks() {
310,543✔
614
            debug!("Relayer: did a download pass, so unblocking mining");
310,542✔
615
            signal_mining_ready(self.globals.get_miner_status());
310,542✔
616
        }
1✔
617
    }
310,543✔
618

619
    /// Choose a miner directive for a sortition with a winner.
620
    ///
621
    /// The decision process is a little tricky, because the right decision depends on:
622
    /// * whether or not we won the _given_ sortition (`sn`)
623
    /// * whether or not we won the sortition that started the ongoing Stacks tenure
624
    /// * whether or not the ongoing Stacks tenure is at or descended from the last-winning
625
    /// sortition
626
    ///
627
    /// Specifically:
628
    ///
629
    /// If we won the given sortition `sn`, then we can start mining immediately with a `BlockFound`
630
    /// tenure-change. The exception is if we won the sortition, but the sortition's winning commit
631
    /// does not commit to the ongoing tenure. In this case, we instead extend the current tenure.
632
    ///
633
    /// Otherwise, if we did not win `sn`, if we won the tenure which started the ongoing Stacks tenure
634
    /// (i.e. we're the active miner), then we _may_ start mining after a timeout _if_ the winning
635
    /// miner (not us) fails to submit a `BlockFound` tenure-change block for `sn`.
636
    fn choose_directive_sortition_with_winner(
1,323✔
637
        &mut self,
1,323✔
638
        sn: BlockSnapshot,
1,323✔
639
        mining_pkh: &Hash160,
1,323✔
640
        committed_index_hash: StacksBlockId,
1,323✔
641
    ) -> MinerDirective {
1,323✔
642
        let won_sortition = sn.miner_pk_hash.as_ref() == Some(mining_pkh);
1,323✔
643

644
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
1,323✔
645
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
1,323✔
646
                .expect("FATAL: failed to query sortition DB for stacks tip");
1,323✔
647
        let canonical_stacks_snapshot =
1,323✔
648
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch)
1,323✔
649
                .expect("FATAL: failed to query sortiiton DB for epoch")
1,323✔
650
                .expect("FATAL: no sortition for canonical stacks tip");
1,323✔
651

652
        // If we won the sortition, ensure that the sortition's winning commit actually commits to
653
        // the ongoing tenure. If it does not (i.e. commit is "stale" and points to N-1 when we are
654
        // currently in N), and if we are also the ongoing tenure's miner, then we must not attempt
655
        // a tenure change (which would reorg our own signed blocks). Instead, we should immediately
656
        // extend the tenure.
657
        if won_sortition && !self.config.get_node_config(false).mock_mining {
1,323✔
658
            let canonical_stacks_tip =
1,140✔
659
                StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
1,140✔
660

661
            let commits_to_tip_tenure = Self::sortition_commits_to_stacks_tip_tenure(
1,140✔
662
                &mut self.chainstate,
1,140✔
663
                &canonical_stacks_tip,
1,140✔
664
                &canonical_stacks_snapshot,
1,140✔
665
                &sn,
1,140✔
666
            ).unwrap_or_else(|e| {
1,140✔
667
                warn!(
×
668
                    "Relayer: Failed to determine if winning sortition commits to current tenure: {e:?}";
669
                    "sortition_ch" => %sn.consensus_hash,
670
                    "stacks_tip_ch" => %canonical_stacks_tip_ch
671
                );
672
                false
×
673
            });
×
674

675
            if !commits_to_tip_tenure {
1,140✔
676
                let won_ongoing_tenure_sortition =
14✔
677
                    canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pkh);
14✔
678

679
                if won_ongoing_tenure_sortition {
14✔
680
                    info!(
4✔
681
                        "Relayer: Won sortition, but commit does not target ongoing tenure. Will extend instead of starting a new tenure.";
682
                        "winning_sortition" => %sn.consensus_hash,
683
                        "ongoing_tenure" => %canonical_stacks_snapshot.consensus_hash,
684
                        "commits_to_tip_tenure?" => commits_to_tip_tenure
4✔
685
                    );
686
                    // Extend tenure to the new burn view instead of attempting BlockFound
687
                    return MinerDirective::ContinueTenure {
4✔
688
                        new_burn_view: sn.consensus_hash,
4✔
689
                    };
4✔
690
                }
10✔
691
            }
1,126✔
692
        }
183✔
693

694
        if won_sortition || self.config.get_node_config(false).mock_mining {
1,319✔
695
            // a sortition happenend, and we won
696
            info!("Won sortition; begin tenure.";
1,143✔
697
                    "winning_sortition" => %sn.consensus_hash);
698
            return MinerDirective::BeginTenure {
1,143✔
699
                parent_tenure_start: committed_index_hash,
1,143✔
700
                burnchain_tip: sn.clone(),
1,143✔
701
                election_block: sn,
1,143✔
702
                late: false,
1,143✔
703
            };
1,143✔
704
        }
176✔
705

706
        // a sortition happened, but we didn't win. Check if we won the ongoing tenure.
707
        debug!(
176✔
708
            "Relayer: did not win sortition {}, so stopping tenure",
709
            &sn.sortition
×
710
        );
711

712
        let won_ongoing_tenure_sortition =
176✔
713
            canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pkh);
176✔
714
        if won_ongoing_tenure_sortition {
176✔
715
            // we won the current ongoing tenure, but not the most recent sortition. Should we attempt to extend immediately or wait for the incoming miner?
716
            if let Ok(has_higher) = Self::has_higher_sortition_commits_to_stacks_tip_tenure(
61✔
717
                &self.sortdb,
61✔
718
                &mut self.chainstate,
61✔
719
                &sn,
61✔
720
                &canonical_stacks_snapshot,
61✔
721
            ) {
61✔
722
                if has_higher {
61✔
723
                    debug!("Relayer: Did not win current sortition but won the prior valid sortition. Will attempt to extend tenure after allowing the new miner some time to come online.";
50✔
724
                            "tenure_extend_wait_timeout_ms" => self.config.miner.tenure_extend_wait_timeout.as_millis(),
×
725
                    );
726
                    self.tenure_extend_time = Some(TenureExtendTime::unresponsive_winner(
50✔
727
                        self.config.miner.tenure_extend_wait_timeout,
50✔
728
                    ));
50✔
729
                } else {
730
                    info!("Relayer: no valid sortition since our last winning sortition. Will extend tenure.");
11✔
731
                    self.tenure_extend_time = Some(TenureExtendTime::immediate(
11✔
732
                        TenureExtendReason::BadSortitionWinner,
11✔
733
                    ));
11✔
734
                }
735
            }
×
736
        }
115✔
737
        MinerDirective::StopTenure
176✔
738
    }
1,323✔
739

740
    /// Choose a miner directive for a sortition with no winner.
741
    ///
742
    /// The decision process is a little tricky, because the right decision depends on:
743
    /// * whether or not we won the sortition that started the ongoing Stacks tenure
744
    /// * whether or not we won the last sortition with a winner
745
    /// * whether or not the last sortition winner has produced a Stacks block
746
    /// * whether or not the ongoing Stacks tenure is at or descended from the last-winning
747
    /// sortition
748
    ///
749
    /// Find out who won the last sortition with a winner.  If it was us, and if we haven't yet
750
    /// submitted a `BlockFound` tenure-change for it (which can happen if this given sortition is
751
    /// from a flash block), then start mining immediately with a "late" `BlockFound` tenure, _and_
752
    /// prepare to start mining right afterwards with an `Extended` tenure-change so as to represent
753
    /// the given sortition `sn`'s burn view in the Stacks chain.
754
    ///
755
    /// Otherwise, if did not win the last-winning sortition, then check to see if we're the ongoing
756
    /// Stack's tenure's miner. If so, then we _may_ start mining after a timeout _if_ the winner of
757
    /// the last-good sortition (not us) fails to submit a `BlockFound` tenure-change block.
758
    /// This can happen if `sn` was a flash block, and the remote miner has yet to process it.
759
    ///
760
    /// We won't always be able to mine -- for example, this could be an empty sortition, but the
761
    /// parent block could be an epoch 2 block.  In this case, the right thing to do is to wait for
762
    /// the next block-commit.
763
    fn choose_directive_sortition_without_winner(
143✔
764
        &mut self,
143✔
765
        sn: BlockSnapshot,
143✔
766
        mining_pk: &Hash160,
143✔
767
    ) -> Option<MinerDirective> {
143✔
768
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
143✔
769
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
143✔
770
                .expect("FATAL: failed to query sortition DB for stacks tip");
143✔
771
        let canonical_stacks_snapshot =
143✔
772
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch)
143✔
773
                .expect("FATAL: failed to query sortiiton DB for epoch")
143✔
774
                .expect("FATAL: no sortition for canonical stacks tip");
143✔
775

776
        // find out what epoch the Stacks tip is in.
777
        // If it's in epoch 2.x, then we must always begin a new tenure, but we can't do so
778
        // right now since this sortition has no winner.
779
        let cur_epoch = SortitionDB::get_stacks_epoch(
143✔
780
            self.sortdb.conn(),
143✔
781
            canonical_stacks_snapshot.block_height,
143✔
782
        )
783
        .expect("FATAL: failed to query sortition DB for epoch")
143✔
784
        .expect("FATAL: no epoch defined for existing sortition");
143✔
785

786
        if cur_epoch.epoch_id < StacksEpochId::Epoch30 {
143✔
787
            debug!(
×
788
                "As of sortition {}, there has not yet been a Nakamoto tip. Cannot mine.",
789
                &canonical_stacks_snapshot.consensus_hash
×
790
            );
791
            return None;
×
792
        }
143✔
793

794
        // find out who won the last non-empty sortition. It may have been us.
795
        let Ok(last_winning_snapshot) = Self::get_last_winning_snapshot(&self.sortdb, &sn)
143✔
796
            .inspect_err(|e| {
143✔
797
                warn!("Relayer: Failed to load last winning snapshot: {e:?}");
×
798
            })
×
799
        else {
800
            // this should be unreachable, but don't tempt fate.
801
            info!("Relayer: No prior snapshots have a winning sortition. Will not try to mine.");
×
802
            return None;
×
803
        };
804

805
        // Check if we won the last winning snapshot AND it commits to the ongoing tenure.
806
        let won_last_winning_snapshot =
143✔
807
            last_winning_snapshot.miner_pk_hash.as_ref() == Some(mining_pk);
143✔
808
        let canonical_stacks_tip =
143✔
809
            StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
143✔
810
        let commits_to_tip_tenure = Self::sortition_commits_to_stacks_tip_tenure(
143✔
811
            &mut self.chainstate,
143✔
812
            &canonical_stacks_tip,
143✔
813
            &canonical_stacks_snapshot,
143✔
814
            &last_winning_snapshot,
143✔
815
        ).unwrap_or_else(|e| {
143✔
816
            warn!(
×
817
                "Relayer: Failed to determine if last winning sortition commits to current tenure: {e:?}";
818
                "sortition_ch" => %sn.consensus_hash,
819
                "stacks_tip_ch" => %canonical_stacks_tip_ch
820
            );
821
            false
×
822
        });
×
823

824
        if (won_last_winning_snapshot && commits_to_tip_tenure)
143✔
825
            || self.config.get_node_config(false).mock_mining
101✔
826
        {
827
            debug!(
42✔
828
                "Relayer: we won the last winning sortition {}",
829
                &last_winning_snapshot.consensus_hash
×
830
            );
831

832
            if Self::need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) {
42✔
833
                info!(
42✔
834
                    "Relayer: will submit late BlockFound for {}",
835
                    &last_winning_snapshot.consensus_hash
42✔
836
                );
837
                // prepare to immediately extend after our BlockFound gets mined.
838
                self.tenure_extend_time = Some(TenureExtendTime::immediate(
42✔
839
                    TenureExtendReason::EmptySortition,
42✔
840
                ));
42✔
841
                return Some(MinerDirective::BeginTenure {
42✔
842
                    parent_tenure_start: StacksBlockId(
42✔
843
                        last_winning_snapshot.winning_stacks_block_hash.clone().0,
42✔
844
                    ),
42✔
845
                    burnchain_tip: sn,
42✔
846
                    election_block: last_winning_snapshot,
42✔
847
                    late: true,
42✔
848
                });
42✔
849
            }
×
850
            let tip_is_last_winning_snapshot = canonical_stacks_snapshot.block_height
×
851
                == last_winning_snapshot.block_height
×
852
                && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash;
×
853

854
            if tip_is_last_winning_snapshot {
×
855
                // this is the ongoing tenure snapshot. A BlockFound has already been issued. We
856
                // can instead opt to Extend immediately
857
                info!("Relayer: BlockFound already issued for the last winning sortition. Will extend tenure.");
×
858
                return Some(MinerDirective::ContinueTenure {
×
859
                    new_burn_view: sn.consensus_hash,
×
860
                });
×
861
            }
×
862
        }
101✔
863

864
        let won_ongoing_tenure_sortition =
101✔
865
            canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pk);
101✔
866
        if won_ongoing_tenure_sortition {
101✔
867
            info!("Relayer: No sortition, but we produced the canonical Stacks tip. Will extend tenure.");
96✔
868
            if !won_last_winning_snapshot {
96✔
869
                // delay trying to continue since the last snasphot with a sortition was won
870
                // by someone else -- there's a chance that this other miner will produce a
871
                // BlockFound in the interim.
872
                debug!("Relayer: Did not win last winning snapshot despite mining the ongoing tenure. Will attempt to extend tenure after allowing the new miner some time to produce a block.");
3✔
873
                self.tenure_extend_time = Some(TenureExtendTime::unresponsive_winner(
3✔
874
                    self.config.miner.tenure_extend_wait_timeout,
3✔
875
                ));
3✔
876
                return None;
3✔
877
            }
93✔
878
            return Some(MinerDirective::ContinueTenure {
93✔
879
                new_burn_view: sn.consensus_hash,
93✔
880
            });
93✔
881
        }
5✔
882

883
        info!("Relayer: No sortition, and we did not produce the last Stacks tip. Will not mine.");
5✔
884
        return None;
5✔
885
    }
143✔
886

887
    /// Determine if we the current tenure winner needs to issue a BlockFound.
888
    /// Assumes the caller has already checked that the last-winning snapshot was won by us.
889
    ///
890
    /// Returns true if the stacks tip's snapshot is an ancestor of the last-won sortition
891
    /// Returns false otherwise.
892
    fn need_block_found(
303✔
893
        canonical_stacks_snapshot: &BlockSnapshot,
303✔
894
        last_winning_snapshot: &BlockSnapshot,
303✔
895
    ) -> bool {
303✔
896
        // we won the last non-empty sortition. Has there been a BlockFound issued for it?
897
        // This would be true if the stacks tip's tenure is at or descends from this snapshot.
898
        // If there has _not_ been a BlockFound, then we should issue one.
899
        if canonical_stacks_snapshot.block_height > last_winning_snapshot.block_height {
303✔
900
            // stacks tip is ahead of this snapshot, so no BlockFound can be issued.
901
            test_debug!(
1✔
902
                "Stacks_tip_sn.block_height ({}) > last_winning_snapshot.block_height ({})",
903
                canonical_stacks_snapshot.block_height,
904
                last_winning_snapshot.block_height
905
            );
906
            false
1✔
907
        } else if canonical_stacks_snapshot.block_height == last_winning_snapshot.block_height
302✔
908
            && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash
19✔
909
        {
910
            // this is the ongoing tenure snapshot. A BlockFound has already been issued.
911
            test_debug!(
17✔
912
                "Ongoing tenure {} already represents last-winning snapshot",
913
                &canonical_stacks_snapshot.consensus_hash
×
914
            );
915
            false
17✔
916
        } else {
917
            // The stacks tip is behind the last-won sortition, so a BlockFound is still needed.
918
            true
285✔
919
        }
920
    }
303✔
921

922
    /// Given the pointer to a recently processed sortition, see if we won the sortition, and
923
    /// determine what miner action (if any) to take.
924
    ///
925
    /// Returns a directive to the relayer thread to either start, stop, or continue a tenure, if
926
    /// this sortition matches the sortition tip and we have a parent to build atop.
927
    ///
928
    /// Otherwise, returns None, meaning no action will be taken.
929
    // This method is covered by the e2e bitcoind tests, which do not show up
930
    //  in mutant coverage.
931
    #[cfg_attr(test, mutants::skip)]
932
    fn process_sortition(
4,275✔
933
        &mut self,
4,275✔
934
        consensus_hash: ConsensusHash,
4,275✔
935
        burn_hash: BurnchainHeaderHash,
4,275✔
936
        committed_index_hash: StacksBlockId,
4,275✔
937
    ) -> Option<MinerDirective> {
4,275✔
938
        let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash)
4,275✔
939
            .expect("FATAL: failed to query sortition DB")
4,275✔
940
            .expect("FATAL: unknown consensus hash");
4,275✔
941

942
        let was_winning_pkh = if let (Some(winning_pkh), Some(my_pkh)) = (
4,275✔
943
            sn.miner_pk_hash.as_ref(),
4,275✔
944
            self.get_mining_key_pkh().as_ref(),
4,275✔
945
        ) {
946
            winning_pkh == my_pkh
4,123✔
947
        } else {
948
            false
152✔
949
        };
950

951
        let won_sortition = sn.sortition && was_winning_pkh;
4,275✔
952
        if won_sortition {
4,275✔
953
            increment_stx_blocks_mined_counter();
3,264✔
954
        }
3,276✔
955
        self.globals.set_last_sortition(sn.clone());
4,275✔
956
        self.globals.counters.bump_blocks_processed();
4,275✔
957
        self.globals.counters.bump_sortitions_processed();
4,275✔
958

959
        // there may be a bufferred stacks block to process, so wake up the coordinator to check
960
        self.globals.coord_comms.announce_new_stacks_block();
4,275✔
961

962
        info!(
4,275✔
963
            "Relayer: Process sortition";
964
            "sortition_ch" => %consensus_hash,
965
            "burn_hash" => %burn_hash,
966
            "burn_height" => sn.block_height,
4,275✔
967
            "winning_txid" => %sn.winning_block_txid,
968
            "committed_parent" => %committed_index_hash,
969
            "won_sortition?" => won_sortition,
4,275✔
970
        );
971

972
        let cur_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())
4,275✔
973
            .expect("FATAL: failed to query sortition DB");
4,275✔
974

975
        if cur_sn.consensus_hash != consensus_hash {
4,275✔
976
            info!("Relayer: Current sortition {} is ahead of processed sortition {consensus_hash}; taking no action", &cur_sn.consensus_hash);
2,581✔
977
            self.globals
2,581✔
978
                .raise_initiative("process_sortition".to_string());
2,581✔
979
            return None;
2,581✔
980
        }
1,694✔
981

982
        // Reset the tenure extend time
983
        self.tenure_extend_time = None;
1,694✔
984
        let Some(mining_pk) = self.get_mining_key_pkh() else {
1,694✔
985
            debug!("No mining key, will not mine");
×
986
            return None;
×
987
        };
988

989
        let epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), sn.block_height)
1,694✔
990
            .expect("FATAL: epoch not found for current snapshot")
1,694✔
991
            .expect("FATAL: epoch not found for current snapshot");
1,694✔
992
        if !epoch.epoch_id.uses_nakamoto_blocks() {
1,694✔
993
            return None;
228✔
994
        }
1,466✔
995

996
        let directive_opt = if sn.sortition {
1,466✔
997
            Some(self.choose_directive_sortition_with_winner(sn, &mining_pk, committed_index_hash))
1,323✔
998
        } else {
999
            self.choose_directive_sortition_without_winner(sn, &mining_pk)
143✔
1000
        };
1001
        debug!(
1,466✔
1002
            "Relayer: Processed sortition {consensus_hash}: Miner directive is {directive_opt:?}"
1003
        );
1004
        directive_opt
1,466✔
1005
    }
4,275✔
1006

1007
    /// Constructs and returns a LeaderKeyRegisterOp out of the provided params
1008
    fn make_key_register_op(
37✔
1009
        vrf_public_key: VRFPublicKey,
37✔
1010
        consensus_hash: &ConsensusHash,
37✔
1011
        miner_pkh: &Hash160,
37✔
1012
    ) -> BlockstackOperationType {
37✔
1013
        BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp {
37✔
1014
            public_key: vrf_public_key,
37✔
1015
            memo: miner_pkh.as_bytes().to_vec(),
37✔
1016
            consensus_hash: consensus_hash.clone(),
37✔
1017
            vtxindex: 0,
37✔
1018
            txid: Txid([0u8; 32]),
37✔
1019
            block_height: 0,
37✔
1020
            burn_header_hash: BurnchainHeaderHash::zero(),
37✔
1021
        })
37✔
1022
    }
37✔
1023

1024
    /// Create and broadcast a VRF public key registration transaction.
1025
    /// Returns true if we succeed in doing so; false if not.
1026
    pub fn rotate_vrf_and_register(&mut self, burn_block: &BlockSnapshot) {
37✔
1027
        if self.last_vrf_key_burn_height.is_some() {
37✔
1028
            // already in-flight
1029
            return;
×
1030
        }
37✔
1031
        let cur_epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), burn_block.block_height)
37✔
1032
            .expect("FATAL: failed to query sortition DB")
37✔
1033
            .expect("FATAL: no epoch defined")
37✔
1034
            .epoch_id;
37✔
1035
        let (vrf_pk, _) = self.keychain.make_vrf_keypair(burn_block.block_height);
37✔
1036
        let burnchain_tip_consensus_hash = &burn_block.consensus_hash;
37✔
1037
        let miner_pkh = self.keychain.get_nakamoto_pkh();
37✔
1038

1039
        debug!(
37✔
1040
            "Submitting LeaderKeyRegister";
1041
            "vrf_pk" => vrf_pk.to_hex(),
×
1042
            "burn_block_height" => burn_block.block_height,
×
1043
            "miner_pkh" => miner_pkh.to_hex(),
×
1044
        );
1045

1046
        let op = Self::make_key_register_op(vrf_pk, burnchain_tip_consensus_hash, &miner_pkh);
37✔
1047

1048
        let mut op_signer = self.keychain.generate_op_signer();
37✔
1049
        if let Ok(txid) = self
37✔
1050
            .bitcoin_controller
37✔
1051
            .submit_operation(cur_epoch, op, &mut op_signer)
37✔
1052
        {
37✔
1053
            // advance key registration state
37✔
1054
            self.last_vrf_key_burn_height = Some(burn_block.block_height);
37✔
1055
            self.globals
37✔
1056
                .set_pending_leader_key_registration(burn_block.block_height, txid);
37✔
1057
            self.globals.counters.bump_naka_submitted_vrfs();
37✔
1058
        }
37✔
1059
    }
37✔
1060

1061
    /// Produce the block-commit for this upcoming tenure, if we can.
1062
    ///
1063
    /// Takes the Nakamoto chain tip (consensus hash, block header hash).
1064
    ///
1065
    /// Returns the (the most recent burn snapshot, the most recent stakcs tip, the commit-op) on success
1066
    /// Returns None if we fail somehow.
1067
    ///
1068
    /// TODO: unit test
1069
    pub(crate) fn make_block_commit(
2,724✔
1070
        &mut self,
2,724✔
1071
        tip_block_ch: &ConsensusHash,
2,724✔
1072
        tip_block_bh: &BlockHeaderHash,
2,724✔
1073
    ) -> Result<LastCommit, NakamotoNodeError> {
2,724✔
1074
        let tip_block_id = StacksBlockId::new(tip_block_ch, tip_block_bh);
2,724✔
1075
        let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())
2,724✔
1076
            .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?;
2,724✔
1077

1078
        let stacks_tip = StacksBlockId::new(tip_block_ch, tip_block_bh);
2,724✔
1079

1080
        // sanity check -- this block must exist and have been processed locally
1081
        let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header(
2,724✔
1082
            &mut self.chainstate.index_conn(),
2,724✔
1083
            &stacks_tip,
2,724✔
1084
            tip_block_ch,
2,724✔
1085
        )
1086
        .map_err(|e| {
2,724✔
1087
            error!(
×
1088
                "Relayer: Failed to get tenure-start block header for stacks tip {stacks_tip}: {e:?}"
1089
            );
1090
            NakamotoNodeError::ParentNotFound
×
1091
        })?
×
1092
        .ok_or_else(|| {
2,724✔
1093
            error!(
×
1094
                "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip}"
1095
            );
1096
            NakamotoNodeError::ParentNotFound
×
1097
        })?;
×
1098

1099
        // load the VRF proof generated in this tenure, so we can use it to seed the VRF in the
1100
        // upcoming tenure.  This may be an epoch2x VRF proof.
1101
        let tip_vrf_proof = NakamotoChainState::get_block_vrf_proof(
2,724✔
1102
            &mut self.chainstate.index_conn(),
2,724✔
1103
            &stacks_tip,
2,724✔
1104
            tip_block_ch,
2,724✔
1105
        )
1106
        .map_err(|e| {
2,724✔
1107
            error!("Failed to load VRF proof for {tip_block_ch} off of {stacks_tip}: {e:?}");
×
1108
            NakamotoNodeError::ParentNotFound
×
1109
        })?
×
1110
        .ok_or_else(|| {
2,724✔
1111
            error!("No block VRF proof for {tip_block_ch} off of {stacks_tip}");
×
1112
            NakamotoNodeError::ParentNotFound
×
1113
        })?;
×
1114

1115
        // let's figure out the recipient set!
1116
        let recipients = get_nakamoto_next_recipients(
2,724✔
1117
            &sort_tip,
2,724✔
1118
            &mut self.sortdb,
2,724✔
1119
            &mut self.chainstate,
2,724✔
1120
            &stacks_tip,
2,724✔
1121
            &self.burnchain,
2,724✔
1122
        )
1123
        .map_err(|e| {
2,724✔
1124
            error!("Relayer: Failure fetching recipient set: {e:?}");
×
1125
            NakamotoNodeError::SnapshotNotFoundForChainTip
×
1126
        })?;
×
1127

1128
        let commit_outs = if self
2,724✔
1129
            .burnchain
2,724✔
1130
            .is_in_prepare_phase(sort_tip.block_height + 1)
2,724✔
1131
        {
1132
            vec![PoxAddress::standard_burn_address(self.config.is_mainnet())]
460✔
1133
        } else {
1134
            RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet())
2,264✔
1135
        };
1136

1137
        // find the sortition that kicked off this tenure (it may be different from the sortition
1138
        // tip, such as when there is no sortition or when the miner of the current sortition never
1139
        // produces a block).  This is used to find the parent block-commit of the block-commit
1140
        // we'll submit.
1141
        let Ok(Some(tip_tenure_sortition)) =
2,724✔
1142
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), tip_block_ch)
2,724✔
1143
        else {
1144
            error!("Relayer: Failed to lookup the block snapshot of highest tenure ID"; "tenure_consensus_hash" => %tip_block_ch);
×
1145
            return Err(NakamotoNodeError::ParentNotFound);
×
1146
        };
1147

1148
        // find the parent block-commit of this commit, so we can find the parent vtxindex
1149
        // if the parent is a shadow block, then the vtxindex would be 0.
1150
        let commit_parent_block_burn_height = tip_tenure_sortition.block_height;
2,724✔
1151
        let commit_parent_winning_vtxindex = if let Ok(Some(parent_winning_tx)) =
2,724✔
1152
            SortitionDB::get_block_commit(
2,724✔
1153
                self.sortdb.conn(),
2,724✔
1154
                &tip_tenure_sortition.winning_block_txid,
2,724✔
1155
                &tip_tenure_sortition.sortition_id,
2,724✔
1156
            ) {
1157
            parent_winning_tx.vtxindex
2,721✔
1158
        } else {
1159
            debug!(
3✔
1160
                "{}/{} ({}) must be a shadow block, since it has no block-commit",
1161
                &tip_block_bh, &tip_block_ch, &tip_block_id
×
1162
            );
1163
            let Ok(Some(parent_version)) =
3✔
1164
                NakamotoChainState::get_nakamoto_block_version(self.chainstate.db(), &tip_block_id)
3✔
1165
            else {
1166
                error!(
×
1167
                    "Relayer: Failed to lookup block version of {}",
1168
                    &tip_block_id
×
1169
                );
1170
                return Err(NakamotoNodeError::ParentNotFound);
×
1171
            };
1172

1173
            if !NakamotoBlockHeader::is_shadow_block_version(parent_version) {
3✔
1174
                error!(
×
1175
                    "Relayer: parent block-commit of {} not found, and it is not a shadow block",
1176
                    &tip_block_id
×
1177
                );
1178
                return Err(NakamotoNodeError::ParentNotFound);
×
1179
            }
3✔
1180

1181
            0
3✔
1182
        };
1183

1184
        // epoch in which this commit will be sent (affects how the burnchain client processes it)
1185
        let Ok(Some(target_epoch)) =
2,724✔
1186
            SortitionDB::get_stacks_epoch(self.sortdb.conn(), sort_tip.block_height + 1)
2,724✔
1187
        else {
1188
            error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1);
×
1189
            return Err(NakamotoNodeError::SnapshotNotFoundForChainTip);
×
1190
        };
1191

1192
        let (_, burnchain_config) = self.check_burnchain_config_changed();
2,724✔
1193

1194
        // let's commit, but target the current burnchain tip with our modulus so the commit is
1195
        // only valid if it lands in the targeted burnchain block height
1196
        let burn_parent_modulus = u8::try_from(sort_tip.block_height % BURN_BLOCK_MINED_AT_MODULUS)
2,724✔
1197
            .map_err(|_| {
2,724✔
1198
                error!("Relayer: Block mining modulus is not u8");
×
1199
                NakamotoNodeError::UnexpectedChainState
×
1200
            })?;
×
1201

1202
        // burnchain signer for this commit
1203
        let sender = self.keychain.get_burnchain_signer();
2,724✔
1204

1205
        // VRF key this commit uses (i.e. the one we registered)
1206
        let key = self
2,724✔
1207
            .globals
2,724✔
1208
            .get_leader_key_registration_state()
2,724✔
1209
            .get_active()
2,724✔
1210
            .ok_or_else(|| NakamotoNodeError::NoVRFKeyActive)?;
2,724✔
1211

1212
        let commit = LeaderBlockCommitOp {
2,724✔
1213
            // NOTE: to be filled in
2,724✔
1214
            treatment: vec![],
2,724✔
1215
            // NOTE: PoX sunset has been disabled prior to taking effect
2,724✔
1216
            sunset_burn: 0,
2,724✔
1217
            // block-commits in Nakamoto commit to the ongoing tenure's tenure-start block (which,
2,724✔
1218
            // when processed, become the start-block of the tenure atop which this miner will
2,724✔
1219
            // produce blocks)
2,724✔
1220
            block_header_hash: BlockHeaderHash(
2,724✔
1221
                highest_tenure_start_block_header.index_block_hash().0,
2,724✔
1222
            ),
2,724✔
1223
            // the rest of this is the same as epoch2x commits, modulo the new epoch marker
2,724✔
1224
            burn_fee: burnchain_config.burn_fee_cap,
2,724✔
1225
            apparent_sender: sender,
2,724✔
1226
            key_block_ptr: u32::try_from(key.block_height)
2,724✔
1227
                .expect("FATAL: burn block height exceeded u32"),
2,724✔
1228
            key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"),
2,724✔
1229
            memo: vec![STACKS_EPOCH_LATEST_MARKER],
2,724✔
1230
            new_seed: VRFSeed::from_proof(&tip_vrf_proof),
2,724✔
1231
            parent_block_ptr: u32::try_from(commit_parent_block_burn_height)
2,724✔
1232
                .expect("FATAL: burn block height exceeded u32"),
2,724✔
1233
            parent_vtxindex: u16::try_from(commit_parent_winning_vtxindex)
2,724✔
1234
                .expect("FATAL: vtxindex exceeded u16"),
2,724✔
1235
            burn_parent_modulus,
2,724✔
1236
            commit_outs,
2,724✔
1237

2,724✔
1238
            // NOTE: to be filled in
2,724✔
1239
            input: (Txid([0; 32]), 0),
2,724✔
1240
            vtxindex: 0,
2,724✔
1241
            txid: Txid([0u8; 32]),
2,724✔
1242
            block_height: 0,
2,724✔
1243
            burn_header_hash: BurnchainHeaderHash::zero(),
2,724✔
1244
        };
2,724✔
1245

1246
        Ok(LastCommit::new(
2,724✔
1247
            commit,
2,724✔
1248
            sort_tip,
2,724✔
1249
            stacks_tip,
2,724✔
1250
            highest_tenure_start_block_header.consensus_hash,
2,724✔
1251
            highest_tenure_start_block_header
2,724✔
1252
                .anchored_header
2,724✔
1253
                .block_hash(),
2,724✔
1254
            target_epoch.epoch_id,
2,724✔
1255
        ))
2,724✔
1256
    }
2,724✔
1257

1258
    #[cfg(test)]
1259
    fn fault_injection_stall_miner_startup() {
1,316✔
1260
        if TEST_MINER_THREAD_STALL.get() {
1,316✔
1261
            // Do an extra check just so we don't log EVERY time.
1262
            warn!("Relayer miner thread startup is stalled due to testing directive to stall the miner");
×
1263
            while TEST_MINER_THREAD_STALL.get() {
×
1264
                std::thread::sleep(std::time::Duration::from_millis(10));
×
1265
            }
×
1266
            warn!(
×
1267
                "Relayer miner thread startup is no longer stalled due to testing directive. Continuing..."
1268
            );
1269
        }
1,316✔
1270
    }
1,316✔
1271

1272
    #[cfg(not(test))]
1273
    fn fault_injection_stall_miner_startup() {}
1274

1275
    #[cfg(test)]
1276
    fn fault_injection_stall_miner_thread_startup() {
1,316✔
1277
        if TEST_MINER_THREAD_START_STALL.get() {
1,316✔
1278
            // Do an extra check just so we don't log EVERY time.
1279
            warn!("Miner thread startup is stalled due to testing directive");
×
1280
            while TEST_MINER_THREAD_START_STALL.get() {
×
1281
                std::thread::sleep(std::time::Duration::from_millis(10));
×
1282
            }
×
1283
            warn!(
×
1284
                "Miner thread startup is no longer stalled due to testing directive. Continuing..."
1285
            );
1286
        }
1,316✔
1287
    }
1,316✔
1288

1289
    #[cfg(not(test))]
1290
    fn fault_injection_stall_miner_thread_startup() {}
1291

1292
    /// Create the block miner thread state.
1293
    /// Only proceeds if all of the following are true:
1294
    /// * the miner is not blocked
1295
    /// * last_burn_block corresponds to the canonical sortition DB's chain tip
1296
    /// * the time of issuance is sufficiently recent
1297
    /// * there are no unprocessed stacks blocks in the staging DB
1298
    /// * the relayer has already tried a download scan that included this sortition (which, if a block was found, would have placed it into the staging DB and marked it as unprocessed)
1299
    /// * a miner thread is not running already
1300
    fn create_block_miner(
1,316✔
1301
        &mut self,
1,316✔
1302
        registered_key: RegisteredKey,
1,316✔
1303
        burn_election_block: BlockSnapshot,
1,316✔
1304
        burn_tip: BlockSnapshot,
1,316✔
1305
        parent_tenure_id: StacksBlockId,
1,316✔
1306
        reason: MinerReason,
1,316✔
1307
        burn_tip_at_start: &ConsensusHash,
1,316✔
1308
    ) -> Result<BlockMinerThread, NakamotoNodeError> {
1,316✔
1309
        if fault_injection_skip_mining(&self.config.node.rpc_bind, burn_tip.block_height) {
1,316✔
1310
            debug!(
×
1311
                "Relayer: fault injection skip mining at block height {}",
1312
                burn_tip.block_height
1313
            );
1314
            return Err(NakamotoNodeError::FaultInjection);
×
1315
        }
1,316✔
1316
        Self::fault_injection_stall_miner_startup();
1,316✔
1317

1318
        let burn_header_hash = burn_tip.burn_header_hash.clone();
1,316✔
1319
        let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())
1,316✔
1320
            .expect("FATAL: failed to query sortition DB for canonical burn chain tip");
1,316✔
1321

1322
        let burn_chain_tip = burn_chain_sn.burn_header_hash.clone();
1,316✔
1323

1324
        if &burn_chain_sn.consensus_hash != burn_tip_at_start {
1,316✔
1325
            info!(
×
1326
                "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}"
1327
            );
1328
            self.globals.counters.bump_missed_tenures();
×
1329
            return Err(NakamotoNodeError::MissedMiningOpportunity);
×
1330
        }
1,316✔
1331

1332
        debug!(
1,316✔
1333
            "Relayer: Spawn tenure thread";
1334
            "height" => burn_tip.block_height,
×
1335
            "burn_header_hash" => %burn_header_hash,
1336
            "parent_tenure_id" => %parent_tenure_id,
1337
            "reason" => %reason,
1338
            "burn_election_block.consensus_hash" => %burn_election_block.consensus_hash,
1339
            "burn_tip.consensus_hash" => %burn_tip.consensus_hash,
1340
        );
1341

1342
        let miner_thread_state = BlockMinerThread::new(
1,316✔
1343
            self,
1,316✔
1344
            registered_key,
1,316✔
1345
            burn_election_block,
1,316✔
1346
            burn_tip.clone(),
1,316✔
1347
            parent_tenure_id,
1,316✔
1348
            burn_tip_at_start,
1,316✔
1349
            reason,
1,316✔
1350
        )?;
×
1351
        Ok(miner_thread_state)
1,316✔
1352
    }
1,316✔
1353

1354
    fn start_new_tenure(
1,316✔
1355
        &mut self,
1,316✔
1356
        parent_tenure_start: StacksBlockId,
1,316✔
1357
        block_election_snapshot: BlockSnapshot,
1,316✔
1358
        burn_tip: BlockSnapshot,
1,316✔
1359
        reason: MinerReason,
1,316✔
1360
        burn_tip_at_start: &ConsensusHash,
1,316✔
1361
    ) -> Result<(), NakamotoNodeError> {
1,316✔
1362
        // when starting a new tenure, block the mining thread if its currently running.
1363
        // the new mining thread will join it (so that the new mining thread stalls, not the relayer)
1364
        let prior_tenure_thread = self.miner_thread.take();
1,316✔
1365
        self.miner_thread_burn_view = None;
1,316✔
1366

1367
        let vrf_key = self
1,316✔
1368
            .globals
1,316✔
1369
            .get_leader_key_registration_state()
1,316✔
1370
            .get_active()
1,316✔
1371
            .ok_or_else(|| {
1,316✔
1372
                warn!("Trying to start new tenure, but no VRF key active");
×
1373
                NakamotoNodeError::NoVRFKeyActive
×
1374
            })?;
×
1375
        let new_miner_state = self.create_block_miner(
1,316✔
1376
            vrf_key,
1,316✔
1377
            block_election_snapshot,
1,316✔
1378
            burn_tip.clone(),
1,316✔
1379
            parent_tenure_start.clone(),
1,316✔
1380
            reason,
1,316✔
1381
            burn_tip_at_start,
1,316✔
1382
        )?;
×
1383
        let miner_abort_flag = new_miner_state.get_abort_flag();
1,316✔
1384

1385
        debug!("Relayer: starting new tenure thread");
1,316✔
1386

1387
        let rand_id = thread_rng().gen::<u32>();
1,316✔
1388
        let is_mock = if self.config.node.mock_mining {
1,316✔
1389
            "mock-"
7✔
1390
        } else {
1391
            ""
1,309✔
1392
        };
1393

1394
        let new_miner_handle = std::thread::Builder::new()
1,316✔
1395
            .name(format!("{is_mock}miner.{parent_tenure_start}.{rand_id}",))
1,316✔
1396
            .stack_size(BLOCK_PROCESSOR_STACK_SIZE)
1,316✔
1397
            .spawn(move || {
1,316✔
1398
                debug!(
1,316✔
1399
                    "New block miner thread ID is {:?}",
1400
                    std::thread::current().id()
×
1401
                );
1402
                Self::fault_injection_stall_miner_thread_startup();
1,316✔
1403
                if let Err(e) = new_miner_state.run_miner(prior_tenure_thread) {
1,316✔
1404
                    info!("Miner thread failed: {e:?}");
1,316✔
1405
                    Err(e)
1,316✔
1406
                } else {
1407
                    Ok(())
×
1408
                }
1409
            })
1,316✔
1410
            .map_err(|e| {
1,316✔
1411
                error!("Relayer: Failed to start tenure thread: {e:?}");
×
1412
                NakamotoNodeError::SpawnError(e)
×
1413
            })?;
×
1414
        debug!(
1,316✔
1415
            "Relayer: started tenure thread ID {:?}",
1416
            new_miner_handle.thread().id()
×
1417
        );
1418
        self.miner_thread
1,316✔
1419
            .replace(MinerStopHandle::new(new_miner_handle, miner_abort_flag));
1,316✔
1420
        self.miner_thread_burn_view.replace(burn_tip);
1,316✔
1421
        Ok(())
1,316✔
1422
    }
1,316✔
1423

1424
    fn stop_tenure(&mut self) -> Result<(), NakamotoNodeError> {
307✔
1425
        // when stopping a tenure, block the mining thread if its currently running, then join it.
1426
        // do this in a new thread will (so that the new thread stalls, not the relayer)
1427
        let Some(prior_tenure_thread) = self.miner_thread.take() else {
307✔
1428
            debug!("Relayer: no tenure thread to stop");
98✔
1429
            return Ok(());
98✔
1430
        };
1431
        self.miner_thread_burn_view = None;
209✔
1432

1433
        let id = prior_tenure_thread.inner_thread().id();
209✔
1434
        let abort_flag = prior_tenure_thread.abort_flag.clone();
209✔
1435
        let globals = self.globals.clone();
209✔
1436

1437
        let stop_handle = std::thread::Builder::new()
209✔
1438
            .name(format!(
209✔
1439
                "tenure-stop({:?})-{}",
1440
                id, self.local_peer.data_url
1441
            ))
1442
            .spawn(move || prior_tenure_thread.stop(&globals))
209✔
1443
            .map_err(|e| {
209✔
1444
                error!("Relayer: Failed to spawn a stop-tenure thread: {e:?}");
×
1445
                NakamotoNodeError::SpawnError(e)
×
1446
            })?;
×
1447

1448
        self.miner_thread
209✔
1449
            .replace(MinerStopHandle::new(stop_handle, abort_flag));
209✔
1450
        debug!("Relayer: stopped tenure thread ID {id:?}");
209✔
1451
        Ok(())
209✔
1452
    }
307✔
1453

1454
    /// Get the public key hash for the mining key.
1455
    fn get_mining_key_pkh(&self) -> Option<Hash160> {
6,244✔
1456
        let Some(ref mining_key) = self.config.miner.mining_key else {
6,244✔
1457
            return None;
×
1458
        };
1459
        Some(Hash160::from_node_public_key(
6,244✔
1460
            &StacksPublicKey::from_private(mining_key),
6,244✔
1461
        ))
6,244✔
1462
    }
6,244✔
1463

1464
    /// Helper method to get the last snapshot with a winner
1465
    fn get_last_winning_snapshot(
410✔
1466
        sortdb: &SortitionDB,
410✔
1467
        sort_tip: &BlockSnapshot,
410✔
1468
    ) -> Result<BlockSnapshot, NakamotoNodeError> {
410✔
1469
        let ih = sortdb.index_handle(&sort_tip.sortition_id);
410✔
1470
        Ok(ih.get_last_snapshot_with_sortition(sort_tip.block_height)?)
410✔
1471
    }
410✔
1472

1473
    /// Returns true if the sortition `sn` commits to the tenure start block of the ongoing Stacks tenure `stacks_tip_sn`.
1474
    /// Returns false otherwise.
1475
    fn sortition_commits_to_stacks_tip_tenure(
1,344✔
1476
        chain_state: &mut StacksChainState,
1,344✔
1477
        stacks_tip_id: &StacksBlockId,
1,344✔
1478
        stacks_tip_sn: &BlockSnapshot,
1,344✔
1479
        sn: &BlockSnapshot,
1,344✔
1480
    ) -> Result<bool, NakamotoNodeError> {
1,344✔
1481
        if !sn.sortition {
1,344✔
1482
            // definitely not a valid sortition
1483
            debug!("Relayer: Sortition {} is empty", &sn.consensus_hash);
×
1484
            return Ok(false);
×
1485
        }
1,344✔
1486
        // The sortition must commit to the tenure start block of the ongoing Stacks tenure.
1487
        let mut ic = chain_state.index_conn();
1,344✔
1488
        let parent_tenure_id = StacksBlockId(sn.winning_stacks_block_hash.clone().0);
1,344✔
1489
        let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header(
1,344✔
1490
            &mut ic,
1,344✔
1491
            stacks_tip_id,
1,344✔
1492
            &stacks_tip_sn.consensus_hash,
1,344✔
1493
        )?
×
1494
        .ok_or_else(|| {
1,344✔
1495
            error!(
×
1496
                "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip_id}"
1497
            );
1498
            NakamotoNodeError::ParentNotFound
×
1499
        })?;
×
1500

1501
        let highest_tenure_start_block_id = highest_tenure_start_block_header.index_block_hash();
1,344✔
1502
        if highest_tenure_start_block_id != parent_tenure_id {
1,344✔
1503
            debug!("Relayer: Sortition {} is at the tip, but does not commit to {parent_tenure_id} so cannot be valid", &sn.consensus_hash;
123✔
1504
                "highest_tenure_start_block_header_block_id" => %highest_tenure_start_block_id);
1505
            return Ok(false);
123✔
1506
        }
1,221✔
1507

1508
        Ok(true)
1,221✔
1509
    }
1,344✔
1510

1511
    /// Determine the highest sortition higher than `elected_tenure_id`, but no higher than
1512
    /// `sort_tip` whose winning commit's parent tenure ID matches the `stacks_tip`,
1513
    /// and whose consensus hash matches the `stacks_tip`'s tenure ID.
1514
    ///
1515
    /// Returns Ok(true) if such a sortition is found, and is higher than that of
1516
    /// `elected_tenure_id`.
1517
    /// Returns Ok(false) if no such sortition is found.
1518
    /// Returns Err(..) on DB errors.
1519
    fn has_higher_sortition_commits_to_stacks_tip_tenure(
61✔
1520
        sortdb: &SortitionDB,
61✔
1521
        chain_state: &mut StacksChainState,
61✔
1522
        sortition_tip: &BlockSnapshot,
61✔
1523
        elected_tenure: &BlockSnapshot,
61✔
1524
    ) -> Result<bool, NakamotoNodeError> {
61✔
1525
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
61✔
1526
            SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap();
61✔
1527
        let canonical_stacks_tip =
61✔
1528
            StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
61✔
1529

1530
        let Ok(Some(canonical_stacks_tip_sn)) =
61✔
1531
            SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &canonical_stacks_tip_ch)
61✔
1532
        else {
1533
            return Err(NakamotoNodeError::ParentNotFound);
×
1534
        };
1535

1536
        sortdb
61✔
1537
            .find_from(sortition_tip.clone(), |cursor| {
72✔
1538
                debug!(
72✔
1539
                    "Relayer: check sortition {} to see if it is valid",
1540
                    &cursor.consensus_hash
×
1541
                );
1542
                // have we reached the last tenure we're looking at?
1543
                if cursor.block_height <= elected_tenure.block_height {
72✔
1544
                    return Ok(FindIter::Halt);
11✔
1545
                }
61✔
1546

1547
                if Self::sortition_commits_to_stacks_tip_tenure(
61✔
1548
                    chain_state,
61✔
1549
                    &canonical_stacks_tip,
61✔
1550
                    &canonical_stacks_tip_sn,
61✔
1551
                    &cursor,
61✔
1552
                )? {
×
1553
                    return Ok(FindIter::Found(()));
50✔
1554
                }
11✔
1555

1556
                // nope. continue the search
1557
                return Ok(FindIter::Continue);
11✔
1558
            })
72✔
1559
            .map(|found| found.is_some())
61✔
1560
    }
61✔
1561

1562
    /// Attempt to continue a miner's tenure into the next burn block.
1563
    /// This is allowed if the miner won the last good sortition -- that is, the sortition which
1564
    /// elected the local view of the canonical Stacks fork's ongoing tenure.
1565
    /// Or if the miner won the last valid sortition prior to the current and the current miner
1566
    /// has failed to produce a block before the required timeout.
1567
    ///
1568
    /// This function assumes that the caller has checked that the sortition referred to by
1569
    /// `new_burn_view` does not have a sortition winner or that the winner has not produced a
1570
    /// valid block yet.
1571
    fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> {
97✔
1572
        if let Err(e) = self.stop_tenure() {
97✔
1573
            error!("Relayer: Failed to stop tenure: {e:?}");
×
1574
            return Ok(());
×
1575
        }
97✔
1576
        debug!("Relayer: successfully stopped tenure; will try to continue.");
97✔
1577

1578
        // try to extend, but only if we aren't already running a thread for the current or newer
1579
        // burnchain view
1580
        let Ok(sn) =
97✔
1581
            SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).inspect_err(|e| {
97✔
1582
                error!("Relayer: failed to read canonical burnchain sortition: {e:?}");
×
1583
            })
×
1584
        else {
1585
            return Ok(());
×
1586
        };
1587

1588
        if let Some(miner_thread_burn_view) = self.miner_thread_burn_view.as_ref() {
97✔
1589
            // a miner thread is already running.  If its burn view is the same as the canonical
1590
            // tip, then do nothing
1591
            if sn.consensus_hash == miner_thread_burn_view.consensus_hash {
×
1592
                info!("Relayer: will not tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %sn.consensus_hash);
×
1593
                return Ok(());
×
1594
            }
×
1595
        }
97✔
1596

1597
        // Get the necessary snapshots and state
1598
        let burn_tip =
97✔
1599
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view)?
97✔
1600
                .ok_or_else(|| {
97✔
1601
                    error!("Relayer: failed to get block snapshot for new burn view");
×
1602
                    NakamotoNodeError::SnapshotNotFoundForChainTip
×
1603
                })?;
×
1604
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
97✔
1605
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap();
97✔
1606
        let canonical_stacks_tip =
97✔
1607
            StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
97✔
1608
        let canonical_stacks_snapshot = SortitionDB::get_block_snapshot_consensus(
97✔
1609
            self.sortdb.conn(),
97✔
1610
            &canonical_stacks_tip_ch,
97✔
1611
        )?
×
1612
        .ok_or_else(|| {
97✔
1613
            error!("Relayer: failed to get block snapshot for canonical tip");
×
1614
            NakamotoNodeError::SnapshotNotFoundForChainTip
×
1615
        })?;
×
1616
        let reason = MinerReason::Extended {
97✔
1617
            burn_view_consensus_hash: new_burn_view.clone(),
97✔
1618
        };
97✔
1619

1620
        if let Err(e) = self.start_new_tenure(
97✔
1621
            canonical_stacks_tip.clone(),
97✔
1622
            canonical_stacks_snapshot.clone(),
97✔
1623
            burn_tip.clone(),
97✔
1624
            reason.clone(),
97✔
1625
            &new_burn_view,
97✔
1626
        ) {
97✔
1627
            error!("Relayer: Failed to start new tenure: {e:?}");
×
1628
        } else {
1629
            debug!("Relayer: successfully started new tenure.";
97✔
1630
                   "parent_tenure_start" => %canonical_stacks_tip,
1631
                   "burn_tip" => %burn_tip.consensus_hash,
1632
                   "burn_view_snapshot" => %burn_tip.consensus_hash,
1633
                   "block_election_snapshot" => %canonical_stacks_snapshot.consensus_hash,
1634
                   "reason" => %reason);
1635
        }
1636
        Ok(())
97✔
1637
    }
97✔
1638

1639
    fn handle_sortition(
4,275✔
1640
        &mut self,
4,275✔
1641
        consensus_hash: ConsensusHash,
4,275✔
1642
        burn_hash: BurnchainHeaderHash,
4,275✔
1643
        committed_index_hash: StacksBlockId,
4,275✔
1644
    ) -> bool {
4,275✔
1645
        let miner_instruction =
1,458✔
1646
            match self.process_sortition(consensus_hash, burn_hash, committed_index_hash) {
4,275✔
1647
                Some(miner_instruction) => miner_instruction,
1,458✔
1648
                None => {
1649
                    return true;
2,817✔
1650
                }
1651
            };
1652

1653
        match miner_instruction {
1,458✔
1654
            MinerDirective::BeginTenure {
1655
                parent_tenure_start,
1,185✔
1656
                burnchain_tip,
1,185✔
1657
                election_block,
1,185✔
1658
                late,
1,185✔
1659
            } => match self.start_new_tenure(
1,185✔
1660
                parent_tenure_start.clone(),
1,185✔
1661
                election_block.clone(),
1,185✔
1662
                election_block.clone(),
1,185✔
1663
                MinerReason::BlockFound { late },
1,185✔
1664
                &burnchain_tip.consensus_hash,
1,185✔
1665
            ) {
1,185✔
1666
                Ok(()) => {
1667
                    debug!("Relayer: successfully started new tenure.";
1,185✔
1668
                           "parent_tenure_start" => %parent_tenure_start,
1669
                           "burn_tip" => %burnchain_tip.consensus_hash,
1670
                           "burn_view_snapshot" => %burnchain_tip.consensus_hash,
1671
                           "block_election_snapshot" => %burnchain_tip.consensus_hash,
1672
                           "reason" => %MinerReason::BlockFound { late });
×
1673
                }
1674
                Err(e) => {
×
1675
                    error!("Relayer: Failed to start new tenure: {e:?}");
×
1676
                }
1677
            },
1678
            MinerDirective::ContinueTenure { new_burn_view } => {
97✔
1679
                match self.continue_tenure(new_burn_view) {
97✔
1680
                    Ok(()) => {
1681
                        debug!("Relayer: successfully handled continue tenure.");
97✔
1682
                    }
1683
                    Err(e) => {
×
1684
                        error!("Relayer: Failed to continue tenure: {e:?}");
×
1685
                        return false;
×
1686
                    }
1687
                }
1688
            }
1689
            MinerDirective::StopTenure => match self.stop_tenure() {
176✔
1690
                Ok(()) => {
1691
                    debug!("Relayer: successfully stopped tenure.");
176✔
1692
                }
1693
                Err(e) => {
×
1694
                    error!("Relayer: Failed to stop tenure: {e:?}");
×
1695
                }
1696
            },
1697
        }
1698

1699
        self.globals.counters.bump_naka_miner_directives();
1,458✔
1700
        true
1,458✔
1701
    }
4,275✔
1702

1703
    #[cfg(test)]
1704
    fn fault_injection_skip_block_commit(&self) -> bool {
90,263✔
1705
        self.globals.counters.skip_commit_op.get()
90,263✔
1706
    }
90,263✔
1707

1708
    #[cfg(not(test))]
1709
    fn fault_injection_skip_block_commit(&self) -> bool {
1710
        false
1711
    }
1712

1713
    /// Get the canonical tip for the miner to commit to.
1714
    /// This is provided as a separate function so that it can be overridden for testing.
1715
    #[cfg(not(test))]
1716
    fn fault_injection_get_tip_for_commit(&self) -> Option<(ConsensusHash, BlockHeaderHash)> {
1717
        None
1718
    }
1719

1720
    #[cfg(test)]
1721
    fn fault_injection_get_tip_for_commit(&self) -> Option<(ConsensusHash, BlockHeaderHash)> {
2,724✔
1722
        TEST_MINER_COMMIT_TIP.get()
2,724✔
1723
    }
2,724✔
1724

1725
    fn get_commit_for_tip(&mut self) -> Result<(ConsensusHash, BlockHeaderHash), DbError> {
2,724✔
1726
        if let Some((consensus_hash, block_header_hash)) = self.fault_injection_get_tip_for_commit()
2,724✔
1727
        {
1728
            info!("Relayer: using test tip for commit";
108✔
1729
                "consensus_hash" => %consensus_hash,
1730
                "block_header_hash" => %block_header_hash,
1731
            );
1732
            Ok((consensus_hash, block_header_hash))
108✔
1733
        } else {
1734
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
2,616✔
1735
        }
1736
    }
2,724✔
1737

1738
    /// Generate and submit the next block-commit, and record it locally
1739
    fn issue_block_commit(&mut self) -> Result<(), NakamotoNodeError> {
90,263✔
1740
        if self.fault_injection_skip_block_commit() {
90,263✔
1741
            debug!(
87,539✔
1742
                "Relayer: not submitting block-commit to bitcoin network due to test directive."
1743
            );
1744
            return Ok(());
87,539✔
1745
        }
2,724✔
1746
        let (tip_block_ch, tip_block_bh) = self.get_commit_for_tip().unwrap_or_else(|e| {
2,724✔
1747
            panic!("Failed to load canonical stacks tip: {e:?}");
×
1748
        });
1749
        let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?;
2,724✔
1750

1751
        let Some(tip_height) = NakamotoChainState::get_block_header(
2,724✔
1752
            self.chainstate.db(),
2,724✔
1753
            &StacksBlockId::new(&tip_block_ch, &tip_block_bh),
2,724✔
1754
        )
1755
        .map_err(|e| {
2,724✔
1756
            warn!("Relayer: failed to load tip {tip_block_ch}/{tip_block_bh}: {e:?}");
×
1757
            NakamotoNodeError::ParentNotFound
×
1758
        })?
×
1759
        .map(|header| header.stacks_block_height) else {
2,724✔
1760
            warn!(
×
1761
                "Relayer: failed to load height for tip {tip_block_ch}/{tip_block_bh} (got None)"
1762
            );
1763
            return Err(NakamotoNodeError::ParentNotFound);
×
1764
        };
1765

1766
        // sign and broadcast
1767
        let mut op_signer = self.keychain.generate_op_signer();
2,724✔
1768
        let res = self.bitcoin_controller.submit_operation(
2,724✔
1769
            *last_committed.get_epoch_id(),
2,724✔
1770
            BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()),
2,724✔
1771
            &mut op_signer,
2,724✔
1772
        );
1773
        let txid = match res {
2,724✔
1774
            Ok(txid) => txid,
1,648✔
1775
            Err(e) => {
1,076✔
1776
                if self.config.node.mock_mining {
1,076✔
1777
                    debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction");
972✔
1778
                    return Ok(());
972✔
1779
                }
104✔
1780
                warn!("Failed to submit block-commit bitcoin transaction: {e}");
104✔
1781
                return Err(NakamotoNodeError::BurnchainSubmissionFailed(e));
104✔
1782
            }
1783
        };
1784

1785
        info!(
1,648✔
1786
            "Relayer: Submitted block-commit";
1787
            "tip_consensus_hash" => %tip_block_ch,
1788
            "tip_block_hash" => %tip_block_bh,
1789
            "tip_height" => %tip_height,
1790
            "tip_block_id" => %StacksBlockId::new(&tip_block_ch, &tip_block_bh),
1,647✔
1791
            "txid" => %txid,
1792
        );
1793

1794
        // update local state
1795
        last_committed.set_txid(&txid);
1,648✔
1796
        self.globals.counters.bump_naka_submitted_commits(
1,648✔
1797
            last_committed.burn_tip.block_height,
1,648✔
1798
            tip_height,
1,648✔
1799
            last_committed.block_commit.burn_fee,
1,648✔
1800
            &last_committed.tenure_consensus_hash,
1,648✔
1801
        );
1802
        self.last_committed = Some(last_committed);
1,648✔
1803

1804
        Ok(())
1,648✔
1805
    }
90,263✔
1806

1807
    /// Determine what the relayer should do to advance the chain.
1808
    /// * If this isn't a miner, then it's always nothing.
1809
    /// * Otherwise, if we haven't done so already, go register a VRF public key
1810
    /// * If the stacks chain tip or burnchain tip has changed, then issue a block-commit
1811
    /// * If the last burn view we started a miner for is not the canonical burn view, then
1812
    /// try and start a new tenure (or continue an existing one).
1813
    fn initiative(&mut self) -> Result<Option<RelayerDirective>, NakamotoNodeError> {
305,604✔
1814
        if !self.is_miner {
305,604✔
1815
            return Ok(None);
68,512✔
1816
        }
237,092✔
1817

1818
        match self.globals.get_leader_key_registration_state() {
237,092✔
1819
            // do we need a VRF key registration?
1820
            LeaderKeyRegistrationState::Inactive => {
1821
                let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())?;
38✔
1822
                return Ok(Some(RelayerDirective::RegisterKey(sort_tip)));
38✔
1823
            }
1824
            // are we still waiting on a pending registration?
1825
            LeaderKeyRegistrationState::Pending(..) => {
1826
                return Ok(None);
4,100✔
1827
            }
1828
            LeaderKeyRegistrationState::Active(_) => {}
232,954✔
1829
        };
1830

1831
        // load up canonical sortition and stacks tips
1832
        let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())?;
232,954✔
1833

1834
        // NOTE: this may be an epoch2x tip
1835
        let (stacks_tip_ch, stacks_tip_bh) =
232,954✔
1836
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())?;
232,954✔
1837
        let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh);
232,954✔
1838

1839
        // check stacks and sortition tips to see if any chainstate change has happened.
1840
        // did our view of the sortition history change?
1841
        // if so, then let's try and confirm the highest tenure so far.
1842
        let burnchain_changed = self
232,954✔
1843
            .last_committed
232,954✔
1844
            .as_ref()
232,954✔
1845
            .map(|cmt| cmt.get_burn_tip().consensus_hash != sort_tip.consensus_hash)
232,954✔
1846
            .unwrap_or(true);
232,954✔
1847

1848
        let highest_tenure_changed = self
232,954✔
1849
            .last_committed
232,954✔
1850
            .as_ref()
232,954✔
1851
            .map(|cmt| cmt.get_tenure_id() != &stacks_tip_ch)
232,954✔
1852
            .unwrap_or(true);
232,954✔
1853

1854
        debug!("Relayer: initiative to commit";
232,954✔
1855
               "sortititon tip" => %sort_tip.consensus_hash,
1856
               "stacks tip" => %stacks_tip,
1857
               "stacks_tip_ch" => %stacks_tip_ch,
1858
               "stacks_tip_bh" => %stacks_tip_bh,
1859
               "last-commit burn view" => %self.last_committed.as_ref().map(|cmt| cmt.get_burn_tip().consensus_hash.to_string()).unwrap_or("(not set)".to_string()),
×
1860
               "last-commit ongoing tenure" => %self.last_committed.as_ref().map(|cmt| cmt.get_tenure_id().to_string()).unwrap_or("(not set)".to_string()),
×
1861
               "burnchain view changed?" => %burnchain_changed,
1862
               "highest tenure changed?" => %highest_tenure_changed);
1863

1864
        // If the miner spend or config has changed, we want to RBF with new config values.
1865
        let (burnchain_config_changed, _) = self.check_burnchain_config_changed();
232,954✔
1866
        let miner_config_changed = self.check_miner_config_changed();
232,954✔
1867

1868
        if burnchain_config_changed || miner_config_changed {
232,954✔
1869
            info!("Miner spend or config changed; issuing block commit with new values";
2✔
1870
                "miner_spend_changed" => %burnchain_config_changed,
1871
                "miner_config_changed" => %miner_config_changed,
1872
            );
1873
            return Ok(Some(RelayerDirective::IssueBlockCommit(
2✔
1874
                stacks_tip_ch,
2✔
1875
                stacks_tip_bh,
2✔
1876
            )));
2✔
1877
        }
232,952✔
1878

1879
        if !burnchain_changed && !highest_tenure_changed {
232,952✔
1880
            // nothing to do
1881
            return Ok(None);
107,939✔
1882
        }
125,013✔
1883

1884
        if highest_tenure_changed {
125,013✔
1885
            // highest-tenure view changed, so we need to send (or RBF) a commit
1886
            return Ok(Some(RelayerDirective::IssueBlockCommit(
79,695✔
1887
                stacks_tip_ch,
79,695✔
1888
                stacks_tip_bh,
79,695✔
1889
            )));
79,695✔
1890
        }
45,318✔
1891

1892
        debug!("Relayer: burnchain view changed, but highest tenure did not");
45,318✔
1893
        // First, check if the changed burnchain view includes any
1894
        // sortitions. If it doesn't submit a block commit immediately.
1895
        //
1896
        // If it does, then wait a bit for the first block in the new
1897
        // tenure to arrive. This is to avoid submitting a block
1898
        // commit that will be immediately RBFed when the first
1899
        // block arrives.
1900
        if let Some(last_committed) = self.last_committed.as_ref() {
45,318✔
1901
            // check if all the sortitions after `last_tenure` are empty sortitions. if they are,
1902
            //  we don't need to wait at all to submit a commit
1903
            let last_tenure_tip_height = SortitionDB::get_consensus_hash_height(
45,318✔
1904
                &self.sortdb,
45,318✔
1905
                last_committed.get_tenure_id(),
45,318✔
1906
            )?
×
1907
            .ok_or_else(|| NakamotoNodeError::ParentNotFound)?;
45,318✔
1908
            let no_sortitions_after_last_tenure = self
45,318✔
1909
                .sortdb
45,318✔
1910
                .find_in_canonical::<_, _, NakamotoNodeError>(|cursor| {
47,539✔
1911
                    if cursor.block_height <= last_tenure_tip_height {
47,539✔
1912
                        return Ok(FindIter::Halt);
79✔
1913
                    }
47,460✔
1914
                    if cursor.sortition {
47,460✔
1915
                        return Ok(FindIter::Found(()));
45,239✔
1916
                    }
2,221✔
1917
                    Ok(FindIter::Continue)
2,221✔
1918
                })?
47,539✔
1919
                .is_none();
45,318✔
1920
            if no_sortitions_after_last_tenure {
45,318✔
1921
                return Ok(Some(RelayerDirective::IssueBlockCommit(
79✔
1922
                    stacks_tip_ch,
79✔
1923
                    stacks_tip_bh,
79✔
1924
                )));
79✔
1925
            }
45,239✔
1926
        }
×
1927

1928
        if self.new_tenure_timeout.is_ready(
45,239✔
1929
            &sort_tip.consensus_hash,
45,239✔
1930
            &self.config.miner.block_commit_delay,
45,239✔
1931
        ) {
1932
            return Ok(Some(RelayerDirective::IssueBlockCommit(
10,487✔
1933
                stacks_tip_ch,
10,487✔
1934
                stacks_tip_bh,
10,487✔
1935
            )));
10,487✔
1936
        } else {
1937
            if let Some(deadline) = self
34,752✔
1938
                .new_tenure_timeout
34,752✔
1939
                .deadline(&self.config.miner.block_commit_delay)
34,752✔
1940
            {
34,752✔
1941
                self.next_initiative = std::cmp::min(self.next_initiative, deadline);
34,752✔
1942
            }
34,752✔
1943

1944
            return Ok(None);
34,752✔
1945
        }
1946
    }
305,604✔
1947

1948
    /// Try to start up a tenure-extend if the tenure_extend_time has expired.
1949
    ///
1950
    /// Will check if the tenure-extend time was set and has expired. If so, will
1951
    /// check if the current miner thread needs to issue a BlockFound or if it can
1952
    /// immediately tenure-extend.
1953
    ///
1954
    /// Note: tenure_extend_time is only set to Some(_) if during sortition processing, the sortition
1955
    /// winner commit is corrupted or the winning miner has yet to produce a block.
1956
    fn check_tenure_timers(&mut self) {
411,754✔
1957
        // Should begin a tenure-extend?
1958
        let Some(tenure_extend_time) = self.tenure_extend_time.clone() else {
411,754✔
1959
            // No tenure extend time set, so nothing to do.
1960
            return;
353,316✔
1961
        };
1962
        if !tenure_extend_time.should_extend() {
58,438✔
1963
            test_debug!(
58,163✔
1964
                "Relayer: will not try to tenure-extend yet ({} <= {})",
1965
                tenure_extend_time.elapsed().as_secs(),
×
1966
                tenure_extend_time.timeout().as_secs()
×
1967
            );
1968
            return;
58,163✔
1969
        }
275✔
1970

1971
        let Some(mining_pkh) = self.get_mining_key_pkh() else {
275✔
1972
            // This shouldn't really ever hit, but just in case.
1973
            warn!("Will not tenure extend -- no mining key");
×
1974
            // If we don't have a mining key set, don't bother checking again.
1975
            self.tenure_extend_time = None;
×
1976
            return;
×
1977
        };
1978
        // reset timer so we can try again if for some reason a miner was already running (e.g. a
1979
        // blockfound from earlier).
1980
        self.tenure_extend_time
275✔
1981
            .as_mut()
275✔
1982
            .map(|t| t.refresh(self.config.miner.tenure_extend_poll_timeout));
275✔
1983
        // try to extend, but only if we aren't already running a thread for the current or newer
1984
        // burnchain view
1985
        let Ok(burn_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())
275✔
1986
            .inspect_err(|e| {
275✔
1987
                error!("Failed to read canonical burnchain sortition: {e:?}");
×
1988
            })
×
1989
        else {
1990
            return;
×
1991
        };
1992

1993
        if let Some(miner_thread_burn_view) = self.miner_thread_burn_view.as_ref() {
275✔
1994
            // a miner thread is already running.  If its burn view is the same as the canonical
1995
            // tip, then do nothing for now
1996
            if burn_tip.consensus_hash == miner_thread_burn_view.consensus_hash {
256✔
1997
                info!("Will not try to start a tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %burn_tip.consensus_hash);
×
1998
                // Do not reset the timer, as we may be able to extend later.
1999
                return;
×
2000
            }
256✔
2001
        }
19✔
2002

2003
        let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
275✔
2004
            SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
275✔
2005
                .expect("FATAL: failed to query sortition DB for stacks tip");
275✔
2006
        let canonical_stacks_tip =
275✔
2007
            StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);
275✔
2008
        let canonical_stacks_snapshot =
275✔
2009
            SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch)
275✔
2010
                .expect("FATAL: failed to query sortiiton DB for epoch")
275✔
2011
                .expect("FATAL: no sortition for canonical stacks tip");
275✔
2012

2013
        match tenure_extend_time.reason() {
275✔
2014
            TenureExtendReason::BadSortitionWinner | TenureExtendReason::EmptySortition => {
2015
                // Before we try to extend, check if we need to issue a BlockFound
2016
                let Ok(last_winning_snapshot) =
267✔
2017
                    Self::get_last_winning_snapshot(&self.sortdb, &burn_tip).inspect_err(|e| {
267✔
2018
                        warn!("Failed to load last winning snapshot: {e:?}");
×
2019
                    })
×
2020
                else {
2021
                    // this should be unreachable, but don't tempt fate.
2022
                    info!("No prior snapshots have a winning sortition. Will not try to mine.");
×
2023
                    self.tenure_extend_time = None;
×
2024
                    return;
×
2025
                };
2026
                let won_last_winning_snapshot =
267✔
2027
                    last_winning_snapshot.miner_pk_hash.as_ref() == Some(&mining_pkh);
267✔
2028
                if won_last_winning_snapshot
267✔
2029
                    && Self::need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot)
256✔
2030
                {
2031
                    info!("Will not tenure extend yet -- need to issue a BlockFound first");
240✔
2032
                    // We may manage to extend later, so don't set the timer to None.
2033
                    return;
240✔
2034
                }
27✔
2035
            }
2036
            TenureExtendReason::UnresponsiveWinner => {}
8✔
2037
        }
2038

2039
        let won_ongoing_tenure_sortition =
35✔
2040
            canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(&mining_pkh);
35✔
2041
        if !won_ongoing_tenure_sortition {
35✔
2042
            debug!("Will not tenure extend. Did not win ongoing tenure sortition";
1✔
2043
                "burn_chain_sortition_tip_ch" => %burn_tip.consensus_hash,
2044
                "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch,
2045
                "burn_chain_sortition_tip_mining_pk" => ?burn_tip.miner_pk_hash,
2046
                "mining_pk" => %mining_pkh
2047
            );
2048
            self.tenure_extend_time = None;
1✔
2049
            return;
1✔
2050
        }
34✔
2051
        // If we reach this code, we have either won the last winning snapshot and have already issued a block found for it and should extend.
2052
        // OR we did not win the last snapshot, but the person who did has failed to produce a block and we should extend our old tenure.
2053
        if let Err(e) = self.stop_tenure() {
34✔
2054
            error!("Relayer: Failed to stop tenure: {e:?}");
×
2055
            return;
×
2056
        }
34✔
2057
        let reason = MinerReason::Extended {
34✔
2058
            burn_view_consensus_hash: burn_tip.consensus_hash.clone(),
34✔
2059
        };
34✔
2060
        debug!("Relayer: successfully stopped tenure; will try to continue.");
34✔
2061
        if let Err(e) = self.start_new_tenure(
34✔
2062
            canonical_stacks_tip.clone(),
34✔
2063
            canonical_stacks_snapshot.clone(),
34✔
2064
            burn_tip.clone(),
34✔
2065
            reason.clone(),
34✔
2066
            &burn_tip.consensus_hash,
34✔
2067
        ) {
34✔
2068
            error!("Relayer: Failed to start new tenure: {e:?}");
×
2069
        } else {
2070
            debug!("Relayer: successfully started new tenure.";
34✔
2071
                   "parent_tenure_start" => %canonical_stacks_tip,
2072
                   "burn_tip" => %burn_tip.consensus_hash,
2073
                   "burn_view_snapshot" => %burn_tip.consensus_hash,
2074
                   "block_election_snapshot" => %canonical_stacks_snapshot.consensus_hash,
2075
                   "reason" => %reason);
2076
            self.tenure_extend_time = None;
34✔
2077
        }
2078
    }
411,754✔
2079

2080
    /// Main loop of the relayer.
2081
    /// Runs in a separate thread.
2082
    /// Continuously receives from `relay_rcv`.
2083
    /// Wakes up once per second to see if we need to continue mining an ongoing tenure.
2084
    pub fn main(mut self, relay_rcv: Receiver<RelayerDirective>) {
239✔
2085
        debug!("relayer thread ID is {:?}", std::thread::current().id());
239✔
2086

2087
        self.next_initiative =
239✔
2088
            Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay);
239✔
2089

2090
        // how often we perform a loop pass below
2091
        let poll_frequency_ms = 1_000;
239✔
2092

2093
        while self.globals.keep_running() {
411,973✔
2094
            self.check_tenure_timers();
411,754✔
2095
            let raised_initiative = self.globals.take_initiative();
411,754✔
2096
            let timed_out = Instant::now() >= self.next_initiative;
411,754✔
2097
            let initiative_directive = if raised_initiative.is_some() || timed_out {
411,754✔
2098
                self.next_initiative =
305,604✔
2099
                    Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay);
305,604✔
2100
                self.initiative()
305,604✔
2101
                    .inspect_err(|e| {
305,604✔
2102
                        error!("Error while getting directive from initiative()"; "err" => ?e);
×
2103
                    })
×
2104
                    .ok()
305,604✔
2105
                    .flatten()
305,604✔
2106
            } else {
2107
                None
106,150✔
2108
            };
2109

2110
            let directive_opt = initiative_directive.or_else(|| {
411,754✔
2111
                // do a time-bound recv on the relayer channel so that we can hit the `initiative()` invocation
2112
                //  and keep_running() checks on each loop iteration
2113
                match relay_rcv.recv_timeout(Duration::from_millis(poll_frequency_ms)) {
321,453✔
2114
                    Ok(directive) => {
314,836✔
2115
                        // only do this once, so we can call .initiative() again
2116
                        Some(directive)
314,836✔
2117
                    }
2118
                    Err(RecvTimeoutError::Timeout) => None,
6,617✔
2119
                    Err(RecvTimeoutError::Disconnected) => {
2120
                        warn!("Relayer receive channel disconnected. Exiting relayer thread");
×
2121
                        Some(RelayerDirective::Exit)
×
2122
                    }
2123
                }
2124
            });
321,453✔
2125

2126
            if let Some(directive) = directive_opt {
411,754✔
2127
                debug!("Relayer: main loop directive";
405,137✔
2128
                       "directive" => %directive,
2129
                       "raised_initiative" => ?raised_initiative,
2130
                       "timed_out" => %timed_out);
2131

2132
                if !self.handle_directive(directive) {
405,137✔
2133
                    break;
20✔
2134
                }
405,117✔
2135
            }
6,617✔
2136
        }
2137

2138
        // kill miner if it's running
2139
        signal_mining_blocked(self.globals.get_miner_status());
239✔
2140

2141
        // set termination flag so other threads die
2142
        self.globals.signal_stop();
239✔
2143

2144
        debug!("Relayer exit!");
239✔
2145
    }
239✔
2146

2147
    /// Try loading up a saved VRF key
2148
    pub(crate) fn load_saved_vrf_key(path: &str, pubkey_hash: &Hash160) -> Option<RegisteredKey> {
43✔
2149
        let mut f = match fs::File::open(path) {
43✔
2150
            Ok(f) => f,
42✔
2151
            Err(e) => {
1✔
2152
                warn!("Could not open {path}: {e:?}");
1✔
2153
                return None;
1✔
2154
            }
2155
        };
2156
        let mut registered_key_bytes = vec![];
42✔
2157
        if let Err(e) = f.read_to_end(&mut registered_key_bytes) {
42✔
2158
            warn!("Failed to read registered key bytes from {path}: {e:?}");
×
2159
            return None;
×
2160
        }
42✔
2161

2162
        let Ok(registered_key) = serde_json::from_slice::<RegisteredKey>(&registered_key_bytes)
42✔
2163
        else {
2164
            warn!("Did not load registered key from {path}: could not decode JSON");
2✔
2165
            return None;
2✔
2166
        };
2167

2168
        // Check that the loaded key's memo matches the current miner's key
2169
        if registered_key.memo != pubkey_hash.as_ref() {
40✔
2170
            warn!("Loaded VRF key does not match mining key");
38✔
2171
            return None;
38✔
2172
        }
2✔
2173

2174
        info!("Loaded registered key from {path}");
2✔
2175
        Some(registered_key)
2✔
2176
    }
43✔
2177

2178
    /// Top-level dispatcher
2179
    pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool {
405,119✔
2180
        debug!("Relayer: handling directive"; "directive" => %directive);
405,119✔
2181
        let continue_running = match directive {
405,119✔
2182
            RelayerDirective::HandleNetResult(net_result) => {
310,543✔
2183
                self.process_network_result(net_result);
310,543✔
2184
                true
310,543✔
2185
            }
2186
            // RegisterKey directives mean that the relayer should try to register a new VRF key.
2187
            // These are triggered by the relayer waking up without an active VRF key.
2188
            RelayerDirective::RegisterKey(last_burn_block) => {
38✔
2189
                if !self.is_miner {
38✔
2190
                    return true;
×
2191
                }
38✔
2192
                if self.globals.in_initial_block_download() {
38✔
2193
                    info!("In initial block download, will not submit VRF registration");
×
2194
                    return true;
×
2195
                }
38✔
2196
                let mut saved_key_opt = None;
38✔
2197
                if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() {
38✔
2198
                    saved_key_opt =
38✔
2199
                        Self::load_saved_vrf_key(path, &self.keychain.get_nakamoto_pkh());
38✔
2200
                }
38✔
2201
                if let Some(saved_key) = saved_key_opt {
38✔
2202
                    debug!("Relayer: resuming VRF key");
1✔
2203
                    self.globals.resume_leader_key(saved_key);
1✔
2204
                } else {
2205
                    self.rotate_vrf_and_register(&last_burn_block);
37✔
2206
                    debug!("Relayer: directive Registered VRF key");
37✔
2207
                }
2208
                self.globals.counters.bump_blocks_processed();
38✔
2209
                true
38✔
2210
            }
2211
            // ProcessedBurnBlock directives correspond to a new sortition perhaps occurring.
2212
            //  relayer should invoke `handle_sortition` to determine if they won the sortition,
2213
            //  and to start their miner, or stop their miner if an active tenure is now ending
2214
            RelayerDirective::ProcessedBurnBlock(consensus_hash, burn_hash, block_header_hash) => {
4,275✔
2215
                if !self.is_miner {
4,275✔
2216
                    return true;
×
2217
                }
4,275✔
2218
                if self.globals.in_initial_block_download() {
4,275✔
2219
                    debug!("In initial block download, will not check sortition for miner");
×
2220
                    return true;
×
2221
                }
4,275✔
2222
                self.handle_sortition(
4,275✔
2223
                    consensus_hash,
4,275✔
2224
                    burn_hash,
4,275✔
2225
                    StacksBlockId(block_header_hash.0),
4,275✔
2226
                )
2227
            }
2228
            // These are triggered by the relayer waking up, seeing a new consensus hash *or* a new first tenure block
2229
            RelayerDirective::IssueBlockCommit(..) => {
2230
                if !self.is_miner {
90,263✔
2231
                    return true;
×
2232
                }
90,263✔
2233
                if self.globals.in_initial_block_download() {
90,263✔
2234
                    debug!("In initial block download, will not issue block commit");
×
2235
                    return true;
×
2236
                }
90,263✔
2237
                if let Err(e) = self.issue_block_commit() {
90,263✔
2238
                    warn!("Relayer failed to issue block commit"; "err" => ?e);
105✔
2239
                }
90,158✔
2240
                true
90,263✔
2241
            }
2242
            RelayerDirective::Exit => false,
×
2243
        };
2244
        debug!("Relayer: handled directive"; "continue_running" => continue_running);
405,119✔
2245
        continue_running
405,119✔
2246
    }
405,119✔
2247

2248
    /// Reload config.burnchain to see if burn_fee_cap has changed.
2249
    /// If it has, update the miner spend amount and return true.
2250
    pub fn check_burnchain_config_changed(&self) -> (bool, BurnchainConfig) {
235,677✔
2251
        let burnchain_config = self.config.get_burnchain_config();
235,677✔
2252
        let last_burnchain_config_opt = self.globals.get_last_burnchain_config();
235,677✔
2253
        let burnchain_config_changed =
235,677✔
2254
            if let Some(last_burnchain_config) = last_burnchain_config_opt {
235,677✔
2255
                last_burnchain_config != burnchain_config
235,442✔
2256
            } else {
2257
                false
235✔
2258
            };
2259

2260
        self.globals
235,677✔
2261
            .set_last_miner_spend_amount(burnchain_config.burn_fee_cap);
235,677✔
2262
        self.globals
235,677✔
2263
            .set_last_burnchain_config(burnchain_config.clone());
235,677✔
2264

2265
        set_mining_spend_amount(
235,677✔
2266
            self.globals.get_miner_status(),
235,677✔
2267
            burnchain_config.burn_fee_cap,
235,677✔
2268
        );
2269

2270
        (burnchain_config_changed, burnchain_config)
235,677✔
2271
    }
235,677✔
2272

2273
    pub fn check_miner_config_changed(&self) -> bool {
232,954✔
2274
        let miner_config = self.config.get_miner_config();
232,954✔
2275
        let last_miner_config_opt = self.globals.get_last_miner_config();
232,954✔
2276
        let miner_config_changed = if let Some(last_miner_config) = last_miner_config_opt {
232,954✔
2277
            last_miner_config != miner_config
232,719✔
2278
        } else {
2279
            false
235✔
2280
        };
2281

2282
        self.globals.set_last_miner_config(miner_config);
232,954✔
2283

2284
        miner_config_changed
232,954✔
2285
    }
232,954✔
2286
}
2287

2288
#[cfg(test)]
2289
pub mod test {
2290
    use std::fs::File;
2291
    use std::io::Write;
2292
    use std::path::Path;
2293
    use std::time::Duration;
2294
    use std::u64;
2295

2296
    use rand::{thread_rng, Rng};
2297
    use stacks::burnchains::Txid;
2298
    use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash};
2299
    use stacks::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId, TrieHash};
2300
    use stacks::util::hash::Hash160;
2301
    use stacks::util::secp256k1::Secp256k1PublicKey;
2302
    use stacks::util::vrf::VRFPublicKey;
2303

2304
    use super::{BurnBlockCommitTimer, RelayerThread};
2305
    use crate::nakamoto_node::save_activated_vrf_key;
2306
    use crate::run_loop::RegisteredKey;
2307
    use crate::Keychain;
2308

2309
    #[test]
2310
    fn load_nonexistent_vrf_key() {
1✔
2311
        let keychain = Keychain::default(vec![0u8; 32]);
1✔
2312
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
1✔
2313
        let pubkey_hash = Hash160::from_node_public_key(&pk);
1✔
2314

2315
        let path = "/tmp/does_not_exist.json";
1✔
2316
        _ = std::fs::remove_file(path);
1✔
2317

2318
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
1✔
2319
        assert!(res.is_none());
1✔
2320
    }
1✔
2321

2322
    #[test]
2323
    fn load_empty_vrf_key() {
1✔
2324
        let keychain = Keychain::default(vec![0u8; 32]);
1✔
2325
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
1✔
2326
        let pubkey_hash = Hash160::from_node_public_key(&pk);
1✔
2327

2328
        let path = "/tmp/empty.json";
1✔
2329
        File::create(path).expect("Failed to create test file");
1✔
2330
        assert!(Path::new(path).exists());
1✔
2331

2332
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
1✔
2333
        assert!(res.is_none());
1✔
2334

2335
        std::fs::remove_file(path).expect("Failed to delete test file");
1✔
2336
    }
1✔
2337

2338
    #[test]
2339
    fn load_bad_vrf_key() {
1✔
2340
        let keychain = Keychain::default(vec![0u8; 32]);
1✔
2341
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
1✔
2342
        let pubkey_hash = Hash160::from_node_public_key(&pk);
1✔
2343

2344
        let path = "/tmp/invalid_saved_key.json";
1✔
2345
        let json_content = r#"{ "hello": "world" }"#;
1✔
2346

2347
        // Write the JSON content to the file
2348
        let mut file = File::create(path).expect("Failed to create test file");
1✔
2349
        file.write_all(json_content.as_bytes())
1✔
2350
            .expect("Failed to write to test file");
1✔
2351
        assert!(Path::new(path).exists());
1✔
2352

2353
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
1✔
2354
        assert!(res.is_none());
1✔
2355

2356
        std::fs::remove_file(path).expect("Failed to delete test file");
1✔
2357
    }
1✔
2358

2359
    #[test]
2360
    fn save_load_vrf_key() {
1✔
2361
        let keychain = Keychain::default(vec![0u8; 32]);
1✔
2362
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
1✔
2363
        let pubkey_hash = Hash160::from_node_public_key(&pk);
1✔
2364
        let key = RegisteredKey {
1✔
2365
            target_block_height: 101,
1✔
2366
            block_height: 102,
1✔
2367
            op_vtxindex: 1,
1✔
2368
            vrf_public_key: VRFPublicKey::from_hex(
1✔
2369
                "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71",
1✔
2370
            )
1✔
2371
            .unwrap(),
1✔
2372
            memo: pubkey_hash.as_ref().to_vec(),
1✔
2373
        };
1✔
2374
        let path = "/tmp/vrf_key.json";
1✔
2375
        save_activated_vrf_key(path, &key);
1✔
2376

2377
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
1✔
2378
        assert!(res.is_some());
1✔
2379

2380
        std::fs::remove_file(path).expect("Failed to delete test file");
1✔
2381
    }
1✔
2382

2383
    #[test]
2384
    fn invalid_saved_memo() {
1✔
2385
        let keychain = Keychain::default(vec![0u8; 32]);
1✔
2386
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
1✔
2387
        let pubkey_hash = Hash160::from_node_public_key(&pk);
1✔
2388
        let key = RegisteredKey {
1✔
2389
            target_block_height: 101,
1✔
2390
            block_height: 102,
1✔
2391
            op_vtxindex: 1,
1✔
2392
            vrf_public_key: VRFPublicKey::from_hex(
1✔
2393
                "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71",
1✔
2394
            )
1✔
2395
            .unwrap(),
1✔
2396
            memo: pubkey_hash.as_ref().to_vec(),
1✔
2397
        };
1✔
2398
        let path = "/tmp/vrf_key.json";
1✔
2399
        save_activated_vrf_key(path, &key);
1✔
2400

2401
        let keychain = Keychain::default(vec![1u8; 32]);
1✔
2402
        let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk());
1✔
2403
        let pubkey_hash = Hash160::from_node_public_key(&pk);
1✔
2404

2405
        let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash);
1✔
2406
        assert!(res.is_none());
1✔
2407

2408
        std::fs::remove_file(path).expect("Failed to delete test file");
1✔
2409
    }
1✔
2410

2411
    #[test]
2412
    fn check_need_block_found() {
1✔
2413
        let consensus_hash_byte = thread_rng().gen();
1✔
2414
        let canonical_stacks_snapshot = BlockSnapshot {
1✔
2415
            block_height: thread_rng().gen::<u64>().wrapping_add(1), // Add one to ensure we can always decrease by 1 without underflowing.
1✔
2416
            burn_header_timestamp: thread_rng().gen(),
1✔
2417
            burn_header_hash: BurnchainHeaderHash([thread_rng().gen(); 32]),
1✔
2418
            consensus_hash: ConsensusHash([consensus_hash_byte; 20]),
1✔
2419
            parent_burn_header_hash: BurnchainHeaderHash([thread_rng().gen(); 32]),
1✔
2420
            ops_hash: OpsHash([thread_rng().gen(); 32]),
1✔
2421
            total_burn: thread_rng().gen(),
1✔
2422
            sortition: true,
1✔
2423
            sortition_hash: SortitionHash([thread_rng().gen(); 32]),
1✔
2424
            winning_block_txid: Txid([thread_rng().gen(); 32]),
1✔
2425
            winning_stacks_block_hash: BlockHeaderHash([thread_rng().gen(); 32]),
1✔
2426
            index_root: TrieHash([thread_rng().gen(); 32]),
1✔
2427
            num_sortitions: thread_rng().gen(),
1✔
2428
            stacks_block_accepted: true,
1✔
2429
            stacks_block_height: thread_rng().gen(),
1✔
2430
            arrival_index: thread_rng().gen(),
1✔
2431
            canonical_stacks_tip_consensus_hash: ConsensusHash([thread_rng().gen(); 20]),
1✔
2432
            canonical_stacks_tip_hash: BlockHeaderHash([thread_rng().gen(); 32]),
1✔
2433
            canonical_stacks_tip_height: thread_rng().gen(),
1✔
2434
            sortition_id: SortitionId([thread_rng().gen(); 32]),
1✔
2435
            parent_sortition_id: SortitionId([thread_rng().gen(); 32]),
1✔
2436
            pox_valid: true,
1✔
2437
            accumulated_coinbase_ustx: thread_rng().gen::<u64>() as u128,
1✔
2438
            miner_pk_hash: Some(Hash160([thread_rng().gen(); 20])),
1✔
2439
        };
1✔
2440

2441
        // The consensus_hashes are the same, and the block heights are the same. Therefore, don't need a block found.
2442
        let last_winning_block_snapshot = canonical_stacks_snapshot.clone();
1✔
2443
        assert!(!RelayerThread::need_block_found(
1✔
2444
            &canonical_stacks_snapshot,
1✔
2445
            &last_winning_block_snapshot
1✔
2446
        ));
1✔
2447

2448
        // The block height of the canonical tip is higher than the last winning snapshot. We already issued a block found.
2449
        let mut canonical_stacks_snapshot_is_higher_than_last_winning_snapshot =
1✔
2450
            last_winning_block_snapshot.clone();
1✔
2451
        canonical_stacks_snapshot_is_higher_than_last_winning_snapshot.block_height =
1✔
2452
            canonical_stacks_snapshot.block_height.saturating_sub(1);
1✔
2453
        assert!(!RelayerThread::need_block_found(
1✔
2454
            &canonical_stacks_snapshot,
1✔
2455
            &canonical_stacks_snapshot_is_higher_than_last_winning_snapshot
1✔
2456
        ));
1✔
2457

2458
        // The block height is the same, but we have different consensus hashes. We need to issue a block found.
2459
        let mut tip_consensus_hash_mismatch = last_winning_block_snapshot.clone();
1✔
2460
        tip_consensus_hash_mismatch.consensus_hash =
1✔
2461
            ConsensusHash([consensus_hash_byte.wrapping_add(1); 20]);
1✔
2462
        assert!(RelayerThread::need_block_found(
1✔
2463
            &canonical_stacks_snapshot,
1✔
2464
            &tip_consensus_hash_mismatch
1✔
2465
        ));
2466

2467
        // The block height is the same, but we have different consensus hashes. We need to issue a block found.
2468
        let mut tip_consensus_hash_mismatch = last_winning_block_snapshot.clone();
1✔
2469
        tip_consensus_hash_mismatch.consensus_hash =
1✔
2470
            ConsensusHash([consensus_hash_byte.wrapping_add(1); 20]);
1✔
2471
        assert!(RelayerThread::need_block_found(
1✔
2472
            &canonical_stacks_snapshot,
1✔
2473
            &tip_consensus_hash_mismatch
1✔
2474
        ));
2475

2476
        // The block height of the canonical tip is lower than the last winning snapshot blockheight. We need to issue a block found.
2477
        let mut canonical_stacks_snapshot_is_lower_than_last_winning_snapshot =
1✔
2478
            last_winning_block_snapshot.clone();
1✔
2479
        canonical_stacks_snapshot_is_lower_than_last_winning_snapshot.block_height =
1✔
2480
            canonical_stacks_snapshot.block_height.saturating_add(1);
1✔
2481
        assert!(RelayerThread::need_block_found(
1✔
2482
            &canonical_stacks_snapshot,
1✔
2483
            &canonical_stacks_snapshot_is_lower_than_last_winning_snapshot
1✔
2484
        ));
2485
    }
1✔
2486

2487
    #[test]
2488
    fn burn_block_commit_timer_units() {
1✔
2489
        let mut burn_block_timer = BurnBlockCommitTimer::NotSet;
1✔
2490
        assert_eq!(burn_block_timer.elapsed_secs(), 0);
1✔
2491

2492
        let ch_0 = ConsensusHash([0; 20]);
1✔
2493
        let ch_1 = ConsensusHash([1; 20]);
1✔
2494
        let ch_2 = ConsensusHash([2; 20]);
1✔
2495

2496
        assert!(!burn_block_timer.is_ready(&ch_0, &Duration::from_secs(1)));
1✔
2497
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
1✔
2498
            panic!("The burn block timer should be set");
×
2499
        };
2500
        assert_eq!(burn_tip, &ch_0);
1✔
2501

2502
        std::thread::sleep(Duration::from_secs(1));
1✔
2503

2504
        assert!(burn_block_timer.is_ready(&ch_0, &Duration::from_secs(0)));
1✔
2505
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
1✔
2506
            panic!("The burn block timer should be set");
×
2507
        };
2508
        assert_eq!(burn_tip, &ch_0);
1✔
2509

2510
        assert!(!burn_block_timer.is_ready(&ch_1, &Duration::from_secs(0)));
1✔
2511
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
1✔
2512
            panic!("The burn block timer should be set");
×
2513
        };
2514
        assert_eq!(burn_tip, &ch_1);
1✔
2515

2516
        assert!(!burn_block_timer.is_ready(&ch_1, &Duration::from_secs(u64::MAX)));
1✔
2517
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
1✔
2518
            panic!("The burn block timer should be set");
×
2519
        };
2520
        assert_eq!(burn_tip, &ch_1);
1✔
2521

2522
        std::thread::sleep(Duration::from_secs(1));
1✔
2523
        assert!(!burn_block_timer.is_ready(&ch_2, &Duration::from_secs(0)));
1✔
2524
        let BurnBlockCommitTimer::Set { burn_tip, .. } = &burn_block_timer else {
1✔
2525
            panic!("The burn block timer should be set");
×
2526
        };
2527
        assert_eq!(burn_tip, &ch_2);
1✔
2528
    }
1✔
2529
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc