• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

stacks-network / stacks-core / 23943169302

03 Apr 2026 10:28AM UTC coverage: 77.573% (-8.1%) from 85.712%
23943169302

Pull #7076

github

7f2377
web-flow
Merge bb87ecec2 into c529ad924
Pull Request #7076: feat: sortition side-table copy and validation

3743 of 4318 new or added lines in 19 files covered. (86.68%)

19304 existing lines in 182 files now uncovered.

172097 of 221852 relevant lines covered (77.57%)

7722182.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

9.86
/stacks-node/src/globals.rs
1
use std::collections::{BTreeMap, HashMap};
2
use std::sync::atomic::{AtomicBool, Ordering};
3
use std::sync::mpsc::SyncSender;
4
use std::sync::{Arc, Mutex};
5

6
use stacks::burnchains::Txid;
7
use stacks::chainstate::burn::operations::LeaderKeyRegisterOp;
8
use stacks::chainstate::burn::BlockSnapshot;
9
use stacks::chainstate::coordinator::comm::CoordinatorChannels;
10
use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap;
11
use stacks::chainstate::stacks::db::StacksChainState;
12
use stacks::chainstate::stacks::miner::MinerStatus;
13
use stacks::config::{BurnchainConfig, MinerConfig};
14
use stacks::net::NetworkResult;
15
use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash};
16

17
use crate::neon::Counters;
18
use crate::neon_node::LeaderKeyRegistrationState;
19
use crate::run_loop::RegisteredKey;
20
use crate::syncctl::PoxSyncWatchdogComms;
21
use crate::TipCandidate;
22

23
pub type NeonGlobals = Globals<RelayerDirective>;
24

25
/// Command types for the relayer thread, issued to it by other threads
26
#[allow(clippy::large_enum_variant)]
27
pub enum RelayerDirective {
28
    /// Handle some new data that arrived on the network (such as blocks, transactions, and
29
    HandleNetResult(NetworkResult),
30
    /// Announce a new sortition.  Process and broadcast the block if we won.
31
    ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash),
32
    /// Try to mine a block
33
    RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms)
34
    /// A nakamoto tenure's first block has been processed.
35
    NakamotoTenureStartProcessed(ConsensusHash, BlockHeaderHash),
36
    /// Try to register a VRF public key
37
    RegisterKey(BlockSnapshot),
38
    /// Stop the relayer thread
39
    Exit,
40
}
41

42
/// Inter-thread communication structure, shared between threads. This
43
/// is generic over the relayer communication channel: nakamoto and
44
/// neon nodes use different relayer directives.
45
pub struct Globals<T> {
46
    /// Last sortition processed
47
    last_sortition: Arc<Mutex<Option<BlockSnapshot>>>,
48
    /// Status of the miner
49
    miner_status: Arc<Mutex<MinerStatus>>,
50
    /// Communication link to the coordinator thread
51
    pub(crate) coord_comms: CoordinatorChannels,
52
    /// Unconfirmed transactions (shared between the relayer and p2p threads)
53
    unconfirmed_txs: Arc<Mutex<UnconfirmedTxMap>>,
54
    /// Writer endpoint to the relayer thread
55
    pub relay_send: SyncSender<T>,
56
    /// Counter state in the main thread
57
    pub counters: Counters,
58
    /// Connection to the PoX sync watchdog
59
    pub sync_comms: PoxSyncWatchdogComms,
60
    /// Global flag to see if we should keep running
61
    pub should_keep_running: Arc<AtomicBool>,
62
    /// Status of our VRF key registration state (shared between the main thread and the relayer)
63
    pub leader_key_registration_state: Arc<Mutex<LeaderKeyRegistrationState>>,
64
    /// Last miner config loaded
65
    last_miner_config: Arc<Mutex<Option<MinerConfig>>>,
66
    /// Last burnchain config
67
    last_burnchain_config: Arc<Mutex<Option<BurnchainConfig>>>,
68
    /// Last miner spend amount
69
    last_miner_spend_amount: Arc<Mutex<Option<u64>>>,
70
    /// burnchain height at which we start mining
71
    start_mining_height: Arc<Mutex<u64>>,
72
    /// estimated winning probability at given bitcoin block heights
73
    estimated_winning_probs: Arc<Mutex<HashMap<u64, f64>>>,
74
    /// previously-selected best tips
75
    /// maps stacks height to tip candidate
76
    previous_best_tips: Arc<Mutex<BTreeMap<u64, TipCandidate>>>,
77
    /// Initiative flag.
78
    /// Raised when the main loop should wake up and do something.
79
    initiative: Arc<Mutex<Option<String>>>,
80
}
81

82
// Need to manually implement Clone, because [derive(Clone)] requires
83
//  all trait bounds to implement Clone, even though T doesn't need Clone
84
//  because it's behind SyncSender.
85
impl<T> Clone for Globals<T> {
UNCOV
86
    fn clone(&self) -> Self {
×
UNCOV
87
        Self {
×
UNCOV
88
            last_sortition: self.last_sortition.clone(),
×
UNCOV
89
            miner_status: self.miner_status.clone(),
×
UNCOV
90
            coord_comms: self.coord_comms.clone(),
×
UNCOV
91
            unconfirmed_txs: self.unconfirmed_txs.clone(),
×
UNCOV
92
            relay_send: self.relay_send.clone(),
×
UNCOV
93
            counters: self.counters.clone(),
×
UNCOV
94
            sync_comms: self.sync_comms.clone(),
×
UNCOV
95
            should_keep_running: self.should_keep_running.clone(),
×
UNCOV
96
            leader_key_registration_state: self.leader_key_registration_state.clone(),
×
UNCOV
97
            last_miner_config: self.last_miner_config.clone(),
×
UNCOV
98
            last_burnchain_config: self.last_burnchain_config.clone(),
×
UNCOV
99
            last_miner_spend_amount: self.last_miner_spend_amount.clone(),
×
UNCOV
100
            start_mining_height: self.start_mining_height.clone(),
×
UNCOV
101
            estimated_winning_probs: self.estimated_winning_probs.clone(),
×
UNCOV
102
            previous_best_tips: self.previous_best_tips.clone(),
×
UNCOV
103
            initiative: self.initiative.clone(),
×
UNCOV
104
        }
×
UNCOV
105
    }
×
106
}
107

108
impl<T> Globals<T> {
109
    #[allow(clippy::too_many_arguments)]
110
    pub fn new(
1✔
111
        coord_comms: CoordinatorChannels,
1✔
112
        miner_status: Arc<Mutex<MinerStatus>>,
1✔
113
        relay_send: SyncSender<T>,
1✔
114
        counters: Counters,
1✔
115
        sync_comms: PoxSyncWatchdogComms,
1✔
116
        should_keep_running: Arc<AtomicBool>,
1✔
117
        start_mining_height: u64,
1✔
118
        leader_key_registration_state: LeaderKeyRegistrationState,
1✔
119
    ) -> Globals<T> {
1✔
120
        Globals {
1✔
121
            last_sortition: Arc::new(Mutex::new(None)),
1✔
122
            miner_status,
1✔
123
            coord_comms,
1✔
124
            unconfirmed_txs: Arc::new(Mutex::new(UnconfirmedTxMap::new())),
1✔
125
            relay_send,
1✔
126
            counters,
1✔
127
            sync_comms,
1✔
128
            should_keep_running,
1✔
129
            leader_key_registration_state: Arc::new(Mutex::new(leader_key_registration_state)),
1✔
130
            last_miner_config: Arc::new(Mutex::new(None)),
1✔
131
            last_burnchain_config: Arc::new(Mutex::new(None)),
1✔
132
            last_miner_spend_amount: Arc::new(Mutex::new(None)),
1✔
133
            start_mining_height: Arc::new(Mutex::new(start_mining_height)),
1✔
134
            estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())),
1✔
135
            previous_best_tips: Arc::new(Mutex::new(BTreeMap::new())),
1✔
136
            initiative: Arc::new(Mutex::new(None)),
1✔
137
        }
1✔
138
    }
1✔
139

140
    /// Does the inventory sync watcher think we still need to
141
    /// catch up to the chain tip?
UNCOV
142
    pub fn in_initial_block_download(&self) -> bool {
×
UNCOV
143
        self.sync_comms.get_ibd()
×
UNCOV
144
    }
×
145

146
    /// Get the last sortition processed by the relayer thread
UNCOV
147
    pub fn get_last_sortition(&self) -> Option<BlockSnapshot> {
×
UNCOV
148
        self.last_sortition
×
UNCOV
149
            .lock()
×
UNCOV
150
            .unwrap_or_else(|_| {
×
151
                error!("Sortition mutex poisoned!");
×
152
                panic!();
×
153
            })
UNCOV
154
            .clone()
×
UNCOV
155
    }
×
156

157
    /// Set the last sortition processed
UNCOV
158
    pub fn set_last_sortition(&self, block_snapshot: BlockSnapshot) {
×
UNCOV
159
        let mut last_sortition = self.last_sortition.lock().unwrap_or_else(|_| {
×
160
            error!("Sortition mutex poisoned!");
×
161
            panic!();
×
162
        });
UNCOV
163
        last_sortition.replace(block_snapshot);
×
UNCOV
164
    }
×
165

166
    /// Get the status of the miner (blocked or ready)
UNCOV
167
    pub fn get_miner_status(&self) -> Arc<Mutex<MinerStatus>> {
×
UNCOV
168
        self.miner_status.clone()
×
UNCOV
169
    }
×
170

UNCOV
171
    pub fn block_miner(&self) {
×
UNCOV
172
        self.miner_status
×
UNCOV
173
            .lock()
×
UNCOV
174
            .expect("FATAL: mutex poisoned")
×
UNCOV
175
            .add_blocked()
×
UNCOV
176
    }
×
177

UNCOV
178
    pub fn unblock_miner(&self) {
×
UNCOV
179
        self.miner_status
×
UNCOV
180
            .lock()
×
UNCOV
181
            .expect("FATAL: mutex poisoned")
×
UNCOV
182
            .remove_blocked()
×
UNCOV
183
    }
×
184

185
    /// Get the main thread's counters
186
    pub fn get_counters(&self) -> Counters {
×
187
        self.counters.clone()
×
188
    }
×
189

190
    /// Called by the relayer to pass unconfirmed txs to the p2p thread, so the p2p thread doesn't
191
    /// need to do the disk I/O needed to instantiate the unconfirmed state trie they represent.
192
    /// Clears the unconfirmed transactions, and replaces them with the chainstate's.
UNCOV
193
    pub fn send_unconfirmed_txs(&self, chainstate: &StacksChainState) {
×
UNCOV
194
        let Some(ref unconfirmed) = chainstate.unconfirmed_state else {
×
UNCOV
195
            return;
×
196
        };
UNCOV
197
        let mut txs = self.unconfirmed_txs.lock().unwrap_or_else(|e| {
×
198
            // can only happen due to a thread panic in the relayer
199
            error!("FATAL: unconfirmed tx arc mutex is poisoned: {e:?}");
×
200
            panic!();
×
201
        });
UNCOV
202
        txs.clear();
×
UNCOV
203
        txs.extend(unconfirmed.mined_txs.clone());
×
UNCOV
204
    }
×
205

206
    /// Called by the p2p thread to accept the unconfirmed tx state processed by the relayer.
207
    /// Puts the shared unconfirmed transactions to chainstate.
UNCOV
208
    pub fn recv_unconfirmed_txs(&self, chainstate: &mut StacksChainState) {
×
UNCOV
209
        let Some(ref mut unconfirmed) = chainstate.unconfirmed_state else {
×
210
            return;
×
211
        };
UNCOV
212
        let txs = self.unconfirmed_txs.lock().unwrap_or_else(|e| {
×
213
            // can only happen due to a thread panic in the relayer
214
            error!("FATAL: unconfirmed tx arc mutex is poisoned: {e:?}");
×
215
            panic!();
×
216
        });
UNCOV
217
        unconfirmed.mined_txs.clear();
×
UNCOV
218
        unconfirmed.mined_txs.extend(txs.clone());
×
UNCOV
219
    }
×
220

221
    /// Signal system-wide stop
222
    #[cfg_attr(test, mutants::skip)]
UNCOV
223
    pub fn signal_stop(&self) {
×
UNCOV
224
        self.should_keep_running.store(false, Ordering::SeqCst);
×
UNCOV
225
    }
×
226

227
    /// Should we keep running?
228
    #[cfg_attr(test, mutants::skip)]
UNCOV
229
    pub fn keep_running(&self) -> bool {
×
UNCOV
230
        self.should_keep_running.load(Ordering::SeqCst)
×
UNCOV
231
    }
×
232

233
    /// Get the handle to the coordinator
UNCOV
234
    pub fn coord(&self) -> &CoordinatorChannels {
×
UNCOV
235
        &self.coord_comms
×
UNCOV
236
    }
×
237

238
    /// Get the current leader key registration state.
239
    /// Called from the runloop thread and relayer thread.
UNCOV
240
    pub fn get_leader_key_registration_state(&self) -> LeaderKeyRegistrationState {
×
UNCOV
241
        let key_state = self
×
UNCOV
242
            .leader_key_registration_state
×
UNCOV
243
            .lock()
×
UNCOV
244
            .unwrap_or_else(|e| {
×
245
                // can only happen due to a thread panic in the relayer
246
                error!("FATAL: leader key registration mutex is poisoned: {e:?}");
×
247
                panic!();
×
248
            });
UNCOV
249
        key_state.clone()
×
UNCOV
250
    }
×
251

252
    /// Set the initial leader key registration state.
253
    /// Called from the runloop thread when booting up.
UNCOV
254
    pub fn set_initial_leader_key_registration_state(&self, new_state: LeaderKeyRegistrationState) {
×
UNCOV
255
        let mut key_state = self
×
UNCOV
256
            .leader_key_registration_state
×
UNCOV
257
            .lock()
×
UNCOV
258
            .unwrap_or_else(|e| {
×
259
                // can only happen due to a thread panic in the relayer
260
                error!("FATAL: leader key registration mutex is poisoned: {e:?}");
×
261
                panic!();
×
262
            });
UNCOV
263
        *key_state = new_state;
×
UNCOV
264
    }
×
265

266
    /// Advance the leader key registration state to pending, given a txid we just sent.
267
    /// Only the relayer thread calls this.
UNCOV
268
    pub fn set_pending_leader_key_registration(&self, target_block_height: u64, txid: Txid) {
×
UNCOV
269
        let mut key_state = self
×
UNCOV
270
            .leader_key_registration_state
×
UNCOV
271
            .lock()
×
UNCOV
272
            .unwrap_or_else(|_e| {
×
273
                error!("FATAL: failed to lock leader key registration state mutex");
×
274
                panic!();
×
275
            });
UNCOV
276
        *key_state = LeaderKeyRegistrationState::Pending(target_block_height, txid);
×
UNCOV
277
    }
×
278

279
    /// Advance the leader key registration state to active, given the VRF key registration ops
280
    /// we've discovered in a given snapshot.
281
    /// The runloop thread calls this whenever it processes a sortition.
UNCOV
282
    pub fn try_activate_leader_key_registration(
×
UNCOV
283
        &self,
×
UNCOV
284
        burn_block_height: u64,
×
UNCOV
285
        key_registers: Vec<LeaderKeyRegisterOp>,
×
UNCOV
286
    ) -> Option<RegisteredKey> {
×
UNCOV
287
        let mut activated_key = None;
×
UNCOV
288
        match self.leader_key_registration_state.lock() {
×
UNCOV
289
            Ok(ref mut leader_key_registration_state) => {
×
UNCOV
290
                for op in key_registers.into_iter() {
×
UNCOV
291
                    if let LeaderKeyRegistrationState::Pending(target_block_height, txid) =
×
UNCOV
292
                        leader_key_registration_state.clone()
×
293
                    {
UNCOV
294
                        info!(
×
295
                            "Received burnchain block #{burn_block_height} including key_register_op - {txid}"
296
                        );
UNCOV
297
                        if txid == op.txid {
×
UNCOV
298
                            let active_key = RegisteredKey {
×
UNCOV
299
                                target_block_height,
×
UNCOV
300
                                vrf_public_key: op.public_key,
×
UNCOV
301
                                block_height: op.block_height,
×
UNCOV
302
                                op_vtxindex: op.vtxindex,
×
UNCOV
303
                                memo: op.memo,
×
UNCOV
304
                            };
×
UNCOV
305

×
UNCOV
306
                            **leader_key_registration_state =
×
UNCOV
307
                                LeaderKeyRegistrationState::Active(active_key.clone());
×
UNCOV
308

×
UNCOV
309
                            activated_key = Some(active_key);
×
UNCOV
310
                        } else {
×
UNCOV
311
                            debug!(
×
312
                                "key_register_op {txid} does not match our pending op {}",
313
                                &op.txid
×
314
                            );
315
                        }
UNCOV
316
                    }
×
317
                }
318
            }
319
            Err(_e) => {
×
320
                error!("FATAL: failed to lock leader key registration state mutex");
×
321
                panic!();
×
322
            }
323
        }
UNCOV
324
        activated_key
×
UNCOV
325
    }
×
326

327
    /// Directly set the leader key activation state from a saved key
UNCOV
328
    pub fn resume_leader_key(&self, registered_key: RegisteredKey) {
×
UNCOV
329
        match self.leader_key_registration_state.lock() {
×
UNCOV
330
            Ok(ref mut leader_key_registration_state) => {
×
UNCOV
331
                **leader_key_registration_state = LeaderKeyRegistrationState::Active(registered_key)
×
332
            }
333
            Err(_e) => {
×
334
                error!("FATAL: failed to lock leader key registration state mutex");
×
335
                panic!();
×
336
            }
337
        }
UNCOV
338
    }
×
339

340
    /// Get the last miner config loaded
UNCOV
341
    pub fn get_last_miner_config(&self) -> Option<MinerConfig> {
×
UNCOV
342
        match self.last_miner_config.lock() {
×
UNCOV
343
            Ok(last_miner_config) => (*last_miner_config).clone(),
×
344
            Err(_e) => {
×
345
                error!("FATAL; failed to lock last miner config");
×
346
                panic!();
×
347
            }
348
        }
UNCOV
349
    }
×
350

351
    /// Set the last miner config loaded
UNCOV
352
    pub fn set_last_miner_config(&self, miner_config: MinerConfig) {
×
UNCOV
353
        match self.last_miner_config.lock() {
×
UNCOV
354
            Ok(ref mut last_miner_config) => **last_miner_config = Some(miner_config),
×
355
            Err(_e) => {
×
356
                error!("FATAL; failed to lock last miner config");
×
357
                panic!();
×
358
            }
359
        }
UNCOV
360
    }
×
361

362
    /// Get the last burnchain config
UNCOV
363
    pub fn get_last_burnchain_config(&self) -> Option<BurnchainConfig> {
×
UNCOV
364
        match self.last_burnchain_config.lock() {
×
UNCOV
365
            Ok(last_burnchain_config) => (*last_burnchain_config).clone(),
×
366
            Err(_e) => {
×
367
                error!("FATAL; failed to lock last burnchain config");
×
368
                panic!();
×
369
            }
370
        }
UNCOV
371
    }
×
372

373
    /// Set the last burnchain config
UNCOV
374
    pub fn set_last_burnchain_config(&self, burnchain_config: BurnchainConfig) {
×
UNCOV
375
        match self.last_burnchain_config.lock() {
×
UNCOV
376
            Ok(ref mut last_burnchain_config) => **last_burnchain_config = Some(burnchain_config),
×
377
            Err(_e) => {
×
378
                error!("FATAL; failed to lock last burnchain config");
×
379
                panic!();
×
380
            }
381
        }
UNCOV
382
    }
×
383

384
    /// Get the last miner spend amount
385
    pub fn get_last_miner_spend_amount(&self) -> Option<u64> {
×
386
        match self.last_miner_spend_amount.lock() {
×
387
            Ok(last_miner_spend_amount) => *last_miner_spend_amount,
×
388
            Err(_e) => {
×
389
                error!("FATAL; failed to lock last miner spend amount");
×
390
                panic!();
×
391
            }
392
        }
393
    }
×
394

395
    /// Set the last miner spend amount
UNCOV
396
    pub fn set_last_miner_spend_amount(&self, spend_amount: u64) {
×
UNCOV
397
        match self.last_miner_spend_amount.lock() {
×
UNCOV
398
            Ok(ref mut last_miner_spend_amount) => **last_miner_spend_amount = Some(spend_amount),
×
399
            Err(_e) => {
×
400
                error!("FATAL; failed to lock last miner spend amount");
×
401
                panic!();
×
402
            }
403
        }
UNCOV
404
    }
×
405

406
    /// Get the height at which we should start mining
UNCOV
407
    pub fn get_start_mining_height(&self) -> u64 {
×
UNCOV
408
        match self.start_mining_height.lock() {
×
UNCOV
409
            Ok(ht) => *ht,
×
410
            Err(_e) => {
×
411
                error!("FATAL: failed to lock start_mining_height");
×
412
                panic!();
×
413
            }
414
        }
UNCOV
415
    }
×
416

417
    /// Set the height at which we started mining.
418
    /// Only takes effect if the current start mining height is 0.
UNCOV
419
    pub fn set_start_mining_height_if_zero(&self, value: u64) {
×
UNCOV
420
        match self.start_mining_height.lock() {
×
UNCOV
421
            Ok(ref mut ht) => {
×
UNCOV
422
                if **ht == 0 {
×
UNCOV
423
                    **ht = value;
×
UNCOV
424
                }
×
425
            }
426
            Err(_e) => {
×
427
                error!("FATAL: failed to lock start_mining_height");
×
428
                panic!();
×
429
            }
430
        }
UNCOV
431
    }
×
432

433
    /// Record an estimated winning probability
434
    pub fn add_estimated_win_prob(&self, burn_height: u64, win_prob: f64) {
×
435
        match self.estimated_winning_probs.lock() {
×
436
            Ok(mut probs) => {
×
437
                probs.insert(burn_height, win_prob);
×
438
            }
×
439
            Err(_e) => {
×
440
                error!("FATAL: failed to lock estimated_winning_probs");
×
441
                panic!();
×
442
            }
443
        }
444
    }
×
445

446
    /// Get the estimated winning probability, if we have one
447
    pub fn get_estimated_win_prob(&self, burn_height: u64) -> Option<f64> {
×
448
        match self.estimated_winning_probs.lock() {
×
449
            Ok(probs) => probs.get(&burn_height).cloned(),
×
450
            Err(_e) => {
×
451
                error!("FATAL: failed to lock estimated_winning_probs");
×
452
                panic!();
×
453
            }
454
        }
455
    }
×
456

457
    /// Record a best-tip
UNCOV
458
    pub fn add_best_tip(&self, stacks_height: u64, tip_candidate: TipCandidate, max_depth: u64) {
×
UNCOV
459
        match self.previous_best_tips.lock() {
×
UNCOV
460
            Ok(mut tips) => {
×
UNCOV
461
                tips.insert(stacks_height, tip_candidate);
×
UNCOV
462
                let mut stale = vec![];
×
UNCOV
463
                for (prev_height, _) in tips.iter() {
×
UNCOV
464
                    if *prev_height + max_depth < stacks_height {
×
UNCOV
465
                        stale.push(*prev_height);
×
UNCOV
466
                    }
×
467
                }
UNCOV
468
                for height in stale.into_iter() {
×
UNCOV
469
                    tips.remove(&height);
×
UNCOV
470
                }
×
471
            }
472
            Err(_e) => {
×
473
                error!("FATAL: failed to lock previous_best_tips");
×
474
                panic!();
×
475
            }
476
        }
UNCOV
477
    }
×
478

479
    /// Get a best-tip at a previous height
UNCOV
480
    pub fn get_best_tip(&self, stacks_height: u64) -> Option<TipCandidate> {
×
UNCOV
481
        match self.previous_best_tips.lock() {
×
UNCOV
482
            Ok(tips) => tips.get(&stacks_height).cloned(),
×
483
            Err(_e) => {
×
484
                error!("FATAL: failed to lock previous_best_tips");
×
485
                panic!();
×
486
            }
487
        }
UNCOV
488
    }
×
489

490
    /// Raise the initiative flag
UNCOV
491
    pub fn raise_initiative(&self, raiser: String) {
×
UNCOV
492
        match self.initiative.lock() {
×
UNCOV
493
            Ok(mut initiative) => {
×
UNCOV
494
                *initiative = Some(raiser);
×
UNCOV
495
            }
×
496
            Err(_e) => {
×
497
                error!("FATAL: failed to lock initiative");
×
498
                panic!();
×
499
            }
500
        }
UNCOV
501
    }
×
502

503
    /// Clear the initiative flag and return its value
UNCOV
504
    pub fn take_initiative(&self) -> Option<String> {
×
UNCOV
505
        match self.initiative.lock() {
×
UNCOV
506
            Ok(mut initiative) => (*initiative).take(),
×
507
            Err(_e) => {
×
508
                error!("FATAL: failed to lock initiative");
×
509
                panic!();
×
510
            }
511
        }
UNCOV
512
    }
×
513
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc