• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tudasc / TypeART / 19336558931

13 Nov 2025 03:24PM UTC coverage: 90.337% (+1.4%) from 88.924%
19336558931

push

github

web-flow
Global variables as type descriptors (#173)

597 of 629 new or added lines in 25 files covered. (94.91%)

2 existing lines in 2 files now uncovered.

4768 of 5278 relevant lines covered (90.34%)

208224.38 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.62
/lib/runtime/AllocationTracking.cpp
1
// TypeART library
2
//
3
// Copyright (c) 2017-2025 TypeART Authors
4
// Distributed under the BSD 3-Clause license.
5
// (See accompanying file LICENSE.txt or copy at
6
// https://opensource.org/licenses/BSD-3-Clause)
7
//
8
// Project home: https://github.com/tudasc/TypeART
9
//
10
// SPDX-License-Identifier: BSD-3-Clause
11
//
12

13
#include "AllocationTracking.h"
14

15
#include "AccessCounter.h"
16
#include "CallbackInterface.h"
17
#include "Runtime.h"
18
#include "RuntimeData.h"
19
#include "TypeDB.h"
20
#include "support/Logger.h"
21

22
#include "llvm/Support/raw_ostream.h"
23

24
#include <algorithm>
25
#include <cassert>
26
#include <iterator>
27
#include <type_traits>
28
#include <vector>
29

30
#ifdef TYPEART_BTREE
31
using namespace btree;
32
#endif
33

34
#define likely(x)   __builtin_expect(!!(x), 1)
35
#define unlikely(x) __builtin_expect(!!(x), 0)
36

37
#define CONCAT_(x, y) x##y
38
#define CONCAT(x, y)  CONCAT_(x, y)
39
#define GUARDNAME     CONCAT(typeart_guard_, __LINE__)
40

41
#define TYPEART_RUNTIME_GUARD     \
42
  typeart::RTGuard GUARDNAME;     \
43
  if (!GUARDNAME.shouldTrack()) { \
44
    return;                       \
45
  }
46

47
namespace typeart {
48

49
namespace detail {
50
template <class...>
51
constexpr std::false_type always_false{};
52
}  // namespace detail
53

54
template <typename Enum>
55
inline Enum operator|(Enum lhs, Enum rhs) {
232,866✔
56
  if constexpr (std::is_enum_v<Enum> && (std::is_same_v<Enum, AllocState> || std::is_same_v<Enum, FreeState>)) {
57
    using enum_t = typename std::underlying_type<Enum>::type;
58
    return static_cast<Enum>(static_cast<enum_t>(lhs) | static_cast<enum_t>(rhs));
232,866✔
59
  } else {
60
    static_assert(detail::always_false<Enum>);
61
  }
62
}
63
template <typename Enum>
64
inline void operator|=(Enum& lhs, Enum rhs) {
8,218✔
65
  if constexpr (std::is_enum_v<Enum> && (std::is_same_v<Enum, AllocState> || std::is_same_v<Enum, FreeState>)) {
66
    lhs = lhs | rhs;
8,218✔
67
  } else {
68
    static_assert(detail::always_false<Enum>);
69
  }
70
}
8,218✔
71

72
template <typename Enum>
73
inline Enum operator&(Enum lhs, Enum rhs) {
74
  if constexpr (std::is_enum_v<Enum> && std::is_same_v<Enum, AllocState>) {
420✔
75
    using enum_t = typename std::underlying_type<Enum>::type;
76
    return static_cast<Enum>(static_cast<enum_t>(lhs) & static_cast<enum_t>(rhs));
77
  } else {
78
    static_assert(detail::always_false<Enum>);
79
  }
80
}
81

82
template <typename Enum>
83
inline typename std::underlying_type<Enum>::type operator==(Enum lhs, Enum rhs) {
84
  if constexpr (std::is_enum_v<Enum> && std::is_same_v<Enum, AllocState>) {
85
    using enum_t = typename std::underlying_type<Enum>::type;
86
    return static_cast<enum_t>(lhs) & static_cast<enum_t>(rhs);
87
  } else {
88
    static_assert(detail::always_false<Enum>);
89
  }
90
}
91

92
using namespace debug;
93

94
namespace {
95
struct ThreadData final {
673✔
96
  RuntimeT::Stack stackVars;
97

98
  ThreadData() {
749✔
99
    stackVars.reserve(RuntimeT::StackReserve);
749✔
100
  }
749✔
101
};
102

103
thread_local ThreadData threadData;
749✔
104

105
}  // namespace
106

107
AllocationTracker::AllocationTracker(const TypeDB& db, Recorder& recorder) : typeDB{db}, runtime_recorder{recorder} {
818✔
108
}
818✔
109

110
void AllocationTracker::onAlloc(const void* addr, int typeId, size_t count, const void* retAddr) {
159,658✔
111
  const auto status = doAlloc(addr, typeId, count, retAddr);
159,658✔
112
  if (status != AllocState::ADDR_SKIPPED) {
159,658✔
113
    runtime_recorder.incHeapAlloc(typeId, count);
159,668✔
114
  }
159,668✔
115
  LOG_TRACE("Alloc " << toString(addr, typeId, count, retAddr, true) << " " << 'H');
159,678✔
116
}
159,690✔
117

118
void AllocationTracker::onAllocStack(const void* addr, int typeId, size_t count, const void* retAddr) {
60,560✔
119
  const auto status = doAlloc(addr, typeId, count, retAddr);
60,560✔
120
  if (status != AllocState::ADDR_SKIPPED) {
60,560✔
121
    threadData.stackVars.push_back(addr);
60,560✔
122
    runtime_recorder.incStackAlloc(typeId, count);
60,560✔
123
  }
60,560✔
124
  LOG_TRACE("Alloc " << toString(addr, typeId, count, retAddr) << " " << 'S');
60,560✔
125
}
60,562✔
126

127
void AllocationTracker::onAllocGlobal(const void* addr, int typeId, size_t count, const void* retAddr) {
2,152✔
128
  const auto status = doAlloc(addr, typeId, count, retAddr);
2,152✔
129
  if (status != AllocState::ADDR_SKIPPED) {
2,152✔
130
    runtime_recorder.incGlobalAlloc(typeId, count);
2,152✔
131
  }
2,152✔
132
  LOG_TRACE("Alloc " << toString(addr, typeId, count, retAddr) << " " << 'G');
2,152✔
133
}
2,152✔
134

135
AllocState AllocationTracker::doAlloc(const void* addr, int typeId, size_t count, const void* retAddr) {
222,357✔
136
  AllocState status = AllocState::NO_INIT;
222,357✔
137
  if (unlikely(!typeDB.isValid(typeId))) {
222,357✔
138
    status |= AllocState::UNKNOWN_ID;
30✔
139
    LOG_ERROR("Allocation of unknown type " << toString(addr, typeId, count, retAddr));
30✔
140
  }
30✔
141

142
  // Calling malloc with size 0 may return a nullptr or some address that can not be written to.
143
  // In the second case, the allocation is tracked anyway so that onFree() does not report an error.
144
  // On the other hand, an allocation on address 0x0 with size > 0 is an actual error.
145
  if (unlikely(count == 0)) {
222,357✔
146
    runtime_recorder.incZeroLengthAddr();
36✔
147
    status |= AllocState::ZERO_COUNT;
36✔
148
    LOG_WARNING("Zero-size allocation " << toString(addr, typeId, count, retAddr));
36✔
149
    if (addr == nullptr) {
36✔
150
      runtime_recorder.incZeroLengthAndNullAddr();
12✔
151
      LOG_ERROR("Zero-size and nullptr allocation " << toString(addr, typeId, count, retAddr));
12✔
152
      return status | AllocState::NULL_ZERO | AllocState::ADDR_SKIPPED;
12✔
153
    }
154
  } else if (unlikely(addr == nullptr)) {
222,345✔
155
    runtime_recorder.incNullAddr();
12✔
156
    LOG_ERROR("Nullptr allocation " << toString(addr, typeId, count, retAddr));
12✔
157
    return status | AllocState::NULL_PTR | AllocState::ADDR_SKIPPED;
12✔
158
  }
159

160
  const auto overridden = wrapper.put(addr, PointerInfo{typeId, count, retAddr});
222,333✔
161

162
  if (unlikely(overridden)) {
222,333✔
163
    runtime_recorder.incAddrReuse();
8,155✔
164
    status |= AllocState::ADDR_REUSE;
8,155✔
165
    LOG_WARNING("Pointer already in map " << toString(addr, typeId, count, retAddr));
8,155✔
166
    // LOG_WARNING("Overridden data in map " << toString(addr, def));
167
  }
8,151✔
168

169
  return status | AllocState::OK;
222,329✔
170
}
222,359✔
171

172
FreeState AllocationTracker::doFreeHeap(const void* addr, const void* retAddr) {
156,142✔
173
  if (unlikely(addr == nullptr)) {
156,142✔
174
    LOG_ERROR("Free on nullptr " << "(" << retAddr << ")");
12✔
175
    return FreeState::ADDR_SKIPPED | FreeState::NULL_PTR;
12✔
176
  }
177

178
  std::optional<PointerInfo> removed = wrapper.remove(addr);
156,130✔
179

180
  if (unlikely(!removed)) {
156,130✔
181
    LOG_ERROR("Free on unregistered address " << addr << " (" << retAddr << ")");
2,232✔
182
    return FreeState::ADDR_SKIPPED | FreeState::UNREG_ADDR;
2,232✔
183
  }
184

185
  LOG_TRACE("Free " << toString(addr, *removed));
153,898✔
186
  if constexpr (!std::is_same_v<Recorder, softcounter::NoneRecorder>) {
187
    runtime_recorder.incHeapFree(removed->typeId, removed->count);
153,834✔
188
  }
189
  return FreeState::OK;
153,834✔
190
}
156,222✔
191

192
void AllocationTracker::onFreeHeap(const void* addr, const void* retAddr) {
156,089✔
193
  const auto status = doFreeHeap(addr, retAddr);
156,089✔
194
  if (FreeState::OK == status) {
156,089✔
195
    runtime_recorder.decHeapAlloc();
153,846✔
196
  }
153,846✔
197
}
156,089✔
198

199
void AllocationTracker::onLeaveScope(int alloca_count, const void* retAddr) {
17,225✔
200
  if (unlikely(alloca_count > static_cast<int>(threadData.stackVars.size()))) {
17,225✔
201
    LOG_ERROR("Stack is smaller than requested de-allocation count. alloca_count: " << alloca_count << ". size: "
48✔
202
                                                                                    << threadData.stackVars.size());
203
    alloca_count = threadData.stackVars.size();
48✔
204
  }
48✔
205

206
  const auto cend      = threadData.stackVars.cend();
17,225✔
207
  const auto start_pos = (cend - alloca_count);
17,225✔
208
  LOG_TRACE("Freeing stack (" << alloca_count << ")  " << std::distance(start_pos, threadData.stackVars.cend()))
17,225✔
209

210
  wrapper.remove_range(start_pos, cend, [&](std::optional<PointerInfo>& removed, MemAddr addr) {
77,785✔
211
    if (unlikely(!removed)) {
60,560✔
212
      LOG_ERROR("Free on unregistered address " << addr << " (" << retAddr << ")");
4,952✔
213
    } else {
4,952✔
214
      LOG_TRACE("Free " << toString(addr, *removed));
55,608✔
215
      if constexpr (!std::is_same_v<Recorder, softcounter::NoneRecorder>) {
216
        runtime_recorder.incStackFree(removed->typeId, removed->count);
55,608✔
217
      }
218
    }
219
  });
60,560✔
220

221
  threadData.stackVars.erase(start_pos, cend);
17,225✔
222
  runtime_recorder.decStackAlloc(alloca_count);
17,225✔
223
  LOG_TRACE("Stack after free: " << threadData.stackVars.size());
17,225✔
224
}
17,225✔
225
// Base address
226
std::optional<RuntimeT::MapEntry> AllocationTracker::findBaseAlloc(const void* addr) {
7,812✔
227
  return wrapper.find(addr);
7,812✔
228
}
229

230
}  // namespace typeart
231

232
void __typeart_alloc(const void* addr, int typeId, size_t count) {
156,305✔
233
  TYPEART_RUNTIME_GUARD;
156,305✔
234
  const void* retAddr = __builtin_return_address(0);
156,305✔
235
  typeart::RuntimeSystem::get().allocation_tracker().onAlloc(addr, typeId, count, retAddr);
156,305✔
236
}
156,341✔
237

238
void __typeart_alloc_stack(const void* addr, int typeId, size_t count) {
30,831,982✔
239
  TYPEART_RUNTIME_GUARD;
30,831,982✔
240
  const void* retAddr = __builtin_return_address(0);
48,762✔
241
  typeart::RuntimeSystem::get().allocation_tracker().onAllocStack(addr, typeId, count, retAddr);
48,762✔
242
}
30,831,984✔
243

244
void __typeart_alloc_global(const void* addr, int typeId, size_t count) {
2,144✔
245
  TYPEART_RUNTIME_GUARD;
2,144✔
246
  const void* retAddr = __builtin_return_address(0);
2,144✔
247
  typeart::RuntimeSystem::get().allocation_tracker().onAllocGlobal(addr, typeId, count, retAddr);
2,144✔
248
}
2,144✔
249

250
void __typeart_free(const void* addr) {
155,094✔
251
  TYPEART_RUNTIME_GUARD;
155,094✔
252
  const void* retAddr = __builtin_return_address(0);
152,898✔
253
  typeart::RuntimeSystem::get().allocation_tracker().onFreeHeap(addr, retAddr);
152,898✔
254
}
155,112✔
255

256
void __typeart_leave_scope(int alloca_count) {
32,152,214✔
257
  TYPEART_RUNTIME_GUARD;
32,152,214✔
258
  const void* retAddr = __builtin_return_address(0);
17,177✔
259
  typeart::RuntimeSystem::get().allocation_tracker().onLeaveScope(alloca_count, retAddr);
17,177✔
260
}
32,152,214✔
261

262
void __typeart_alloc_omp(const void* addr, int typeId, size_t count) {
3,200✔
263
  TYPEART_RUNTIME_GUARD;
3,200✔
264
  const void* retAddr = __builtin_return_address(0);
3,200✔
265
  auto& rt            = typeart::RuntimeSystem::get();
3,200✔
266
  rt.allocation_tracker().onAlloc(addr, typeId, count, retAddr);
3,200✔
267
  rt.recorder.incOmpContextHeap();
3,200✔
268
}
3,200✔
269

270
void __typeart_alloc_stack_omp(const void* addr, int typeId, size_t count) {
314✔
271
  TYPEART_RUNTIME_GUARD;
314✔
272
  const void* retAddr = __builtin_return_address(0);
314✔
273
  auto& rt            = typeart::RuntimeSystem::get();
314✔
274
  rt.allocation_tracker().onAllocStack(addr, typeId, count, retAddr);
311✔
275
  rt.recorder.incOmpContextStack();
311✔
276
}
316✔
277

278
void __typeart_free_omp(const void* addr) {
3,200✔
279
  TYPEART_RUNTIME_GUARD;
3,200✔
280
  const void* retAddr = __builtin_return_address(0);
3,200✔
281
  auto& rt            = typeart::RuntimeSystem::get();
3,200✔
282
  rt.allocation_tracker().onFreeHeap(addr, retAddr);
3,200✔
283
  rt.recorder.incOmpContextFree();
3,200✔
284
}
3,200✔
285

286
void __typeart_leave_scope_omp(int alloca_count) {
48✔
287
  TYPEART_RUNTIME_GUARD;
48✔
288
  const void* retAddr = __builtin_return_address(0);
48✔
289
  typeart::RuntimeSystem::get().allocation_tracker().onLeaveScope(alloca_count, retAddr);
48✔
290
}
48✔
291

292
void __typeart_alloc_mty(const void* addr, const void* info, size_t count) {
186✔
293
  TYPEART_RUNTIME_GUARD;
186✔
294
  const void* retAddr = __builtin_return_address(0);
186✔
295
  const auto type_id  = reinterpret_cast<const typeart::GlobalTypeInfo*>(info)->type_id;
186✔
296
  auto& rt            = typeart::RuntimeSystem::get();
186✔
297
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
372✔
298
  rt.allocation_tracker().onAlloc(addr, type_id, count, retAddr);
186✔
299
}
186✔
300

301
void __typeart_alloc_stack_mty(const void* addr, const void* info, size_t count) {
31,983,701✔
302
  TYPEART_RUNTIME_GUARD;
31,983,701✔
303
  const void* retAddr = __builtin_return_address(0);
11,488✔
304
  const auto type_id  = reinterpret_cast<const typeart::GlobalTypeInfo*>(info)->type_id;
11,488✔
305
  auto& rt            = typeart::RuntimeSystem::get();
11,488✔
306
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
22,972✔
307
  rt.allocation_tracker().onAllocStack(addr, type_id, count, retAddr);
11,486✔
308
}
31,983,703✔
309

310
void __typeart_alloc_global_mty(const void* addr, const void* info, size_t count) {
8✔
311
  TYPEART_RUNTIME_GUARD;
8✔
312
  const void* retAddr = __builtin_return_address(0);
8✔
313
  const auto type_id  = reinterpret_cast<const typeart::GlobalTypeInfo*>(info)->type_id;
8✔
314
  auto& rt            = typeart::RuntimeSystem::get();
8✔
315
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
16✔
316
  rt.allocation_tracker().onAllocGlobal(addr, type_id, count, retAddr);
8✔
317
}
8✔
318

NEW
319
void __typeart_alloc_omp_mty(const void* addr, const void* info, size_t count) {
×
NEW
320
  TYPEART_RUNTIME_GUARD;
×
NEW
321
  const void* retAddr = __builtin_return_address(0);
×
NEW
322
  const auto type_id  = reinterpret_cast<const typeart::GlobalTypeInfo*>(info)->type_id;
×
NEW
323
  auto& rt            = typeart::RuntimeSystem::get();
×
NEW
324
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
×
NEW
325
  rt.allocation_tracker().onAlloc(addr, type_id, count, retAddr);
×
NEW
326
}
×
327

NEW
328
void __typeart_alloc_stack_omp_mty(const void* addr, const void* info, size_t count) {
×
NEW
329
  TYPEART_RUNTIME_GUARD;
×
NEW
330
  const void* retAddr = __builtin_return_address(0);
×
NEW
331
  const auto type_id  = reinterpret_cast<const typeart::GlobalTypeInfo*>(info)->type_id;
×
NEW
332
  auto& rt            = typeart::RuntimeSystem::get();
×
NEW
333
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
×
NEW
334
  rt.allocation_tracker().onAllocStack(addr, type_id, count, retAddr);
×
UNCOV
335
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc