• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

stacks-network / stacks-core / 25404138305-1

05 May 2026 09:47PM UTC coverage: 85.69% (-0.02%) from 85.712%
25404138305-1

Pull #7169

github

497ffd
web-flow
Merge 35db1183d into 53ffba0ab
Pull Request #7169: Feat: add defensive memory allocation for miners/signers

134 of 139 new or added lines in 11 files covered. (96.4%)

4591 existing lines in 96 files now uncovered.

187733 of 219085 relevant lines covered (85.69%)

18687545.45 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

94.83
/stacks-common/src/alloc_tracker.rs
1
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
2
// Copyright (C) 2020-2026 Stacks Open Internet Foundation
3
//
4
// This program is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8
//
9
// This program is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13
//
14
// You should have received a copy of the GNU General Public License
15
// along with this program.  If not, see <http://www.gnu.org/licenses/>.
16
use std::alloc::{GlobalAlloc, Layout};
17
use std::cell::Cell;
18

19
thread_local! {
20
    static THREAD_ALLOCATIONS: Cell<AllocationCounter> = const { Cell::new(AllocationCounter::ZERO) };
21
}
22

23
/// Counter for allocated and deallocated bytes
24
#[derive(Clone, Copy)]
25
pub struct AllocationCounter {
26
    allocated: u64,
27
    deallocated: u64,
28
}
29

30
impl AllocationCounter {
31
    pub const ZERO: Self = Self {
32
        allocated: 0,
33
        deallocated: 0,
34
    };
35

36
    /// Net allocation (allocated - deallocated) over a `baseline`
37
    pub fn net_allocated(&self, baseline: &AllocationCounter) -> u64 {
60,150,270✔
38
        let alloc = self.allocated.saturating_sub(baseline.allocated);
60,150,270✔
39
        let dealloc = self.deallocated.saturating_sub(baseline.deallocated);
60,150,270✔
40
        alloc.saturating_sub(dealloc)
60,150,270✔
41
    }
60,150,270✔
42

43
    /// Return `self` with allocated incremented by `increment`
44
    fn increment_alloc(mut self, increment: u64) -> Self {
2,147,483,647✔
45
        self.allocated += increment;
2,147,483,647✔
46
        self
2,147,483,647✔
47
    }
2,147,483,647✔
48

49
    /// Return `self` with deallocated incremented by `increment`
50
    fn increment_dealloc(mut self, increment: u64) -> Self {
2,147,483,647✔
51
        self.deallocated += increment;
2,147,483,647✔
52
        self
2,147,483,647✔
53
    }
2,147,483,647✔
54
}
55

56
/// Read the allocation counter for the current thread.
57
///
58
/// Returns AllocationCounter::ZERO if the tracking allocator is not installed or if TLS is
59
/// being torn down (thread shutdown).
60
pub fn thread_allocated() -> AllocationCounter {
63,323,985✔
61
    THREAD_ALLOCATIONS
63,323,985✔
62
        .try_with(Cell::get)
63,323,985✔
63
        .unwrap_or(AllocationCounter::ZERO)
63,323,985✔
64
}
63,323,985✔
65

66
/// A `GlobalAlloc` wrapper that counts per-thread allocations and
67
/// deallocations. Delegates all actual allocation work to the inner
68
/// allocator `A`.
69
pub struct TrackingAllocator<A: GlobalAlloc> {
70
    /// The underlying allocator that performs the real work.
71
    pub inner: A,
72
}
73

74
unsafe impl<A: GlobalAlloc> GlobalAlloc for TrackingAllocator<A> {
75
    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
2,147,483,647✔
76
        let ptr = unsafe { self.inner.alloc(layout) };
2,147,483,647✔
77
        if !ptr.is_null() {
2,147,483,647✔
78
            let _ = THREAD_ALLOCATIONS.try_with(|c| {
2,147,483,647✔
79
                let next = c.get().increment_alloc(layout.size() as u64);
2,147,483,647✔
80
                c.set(next);
2,147,483,647✔
81
            });
2,147,483,647✔
NEW
82
        }
×
83
        ptr
2,147,483,647✔
84
    }
2,147,483,647✔
85

86
    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
2,147,483,647✔
87
        unsafe { self.inner.dealloc(ptr, layout) };
2,147,483,647✔
88
        let _ = THREAD_ALLOCATIONS.try_with(|c| {
2,147,483,647✔
89
            let next = c.get().increment_dealloc(layout.size() as u64);
2,147,483,647✔
90
            c.set(next);
2,147,483,647✔
91
        });
2,147,483,647✔
92
    }
2,147,483,647✔
93

94
    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
233,305,997✔
95
        let ptr = unsafe { self.inner.alloc_zeroed(layout) };
233,305,997✔
96
        if !ptr.is_null() {
233,305,997✔
97
            let _ = THREAD_ALLOCATIONS.try_with(|c| {
233,305,997✔
98
                let next = c.get().increment_alloc(layout.size() as u64);
233,305,997✔
99
                c.set(next);
233,305,997✔
100
            });
233,305,997✔
NEW
101
        }
×
102
        ptr
233,305,997✔
103
    }
233,305,997✔
104

105
    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
2,147,483,647✔
106
        let new_ptr = unsafe { self.inner.realloc(ptr, layout, new_size) };
2,147,483,647✔
107
        // Note: if `new_ptr` is null, no deallocation or allocation
108
        // happened, `ptr` remains valid.
109
        if !new_ptr.is_null() {
2,147,483,647✔
110
            let _ = THREAD_ALLOCATIONS.try_with(|c| {
2,147,483,647✔
111
                let next = c
2,147,483,647✔
112
                    .get()
2,147,483,647✔
113
                    .increment_dealloc(layout.size() as u64)
2,147,483,647✔
114
                    .increment_alloc(new_size as u64);
2,147,483,647✔
115
                c.set(next);
2,147,483,647✔
116
            });
2,147,483,647✔
NEW
117
        }
×
118
        new_ptr
2,147,483,647✔
119
    }
2,147,483,647✔
120
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc