• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tudasc / TypeART / 20399417387

20 Dec 2025 08:07PM UTC coverage: 90.245%. First build
20399417387

Pull #182

github

web-flow
Merge d45b8fb56 into af3caee32
Pull Request #182: Linking fixes

0 of 2 new or added lines in 1 file covered. (0.0%)

4857 of 5382 relevant lines covered (90.25%)

33556.09 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.62
/lib/runtime/AllocationTracking.cpp
1
// TypeART library
2
//
3
// Copyright (c) 2017-2025 TypeART Authors
4
// Distributed under the BSD 3-Clause license.
5
// (See accompanying file LICENSE.txt or copy at
6
// https://opensource.org/licenses/BSD-3-Clause)
7
//
8
// Project home: https://github.com/tudasc/TypeART
9
//
10
// SPDX-License-Identifier: BSD-3-Clause
11
//
12

13
#include "AllocationTracking.h"
14

15
#include "AccessCounter.h"
16
#include "CallbackInterface.h"
17
#include "Runtime.h"
18
#include "RuntimeData.h"
19
#include "TypeDB.h"
20
#include "support/Logger.h"
21

22
#include "llvm/Support/raw_ostream.h"
23

24
#include <algorithm>
25
#include <cassert>
26
#include <iterator>
27
#include <type_traits>
28
#include <vector>
29

30
#ifdef TYPEART_BTREE
31
using namespace btree;
32
#endif
33

34
#define likely(x)   __builtin_expect(!!(x), 1)
35
#define unlikely(x) __builtin_expect(!!(x), 0)
36

37
#define CONCAT_(x, y) x##y
38
#define CONCAT(x, y)  CONCAT_(x, y)
39
#define GUARDNAME     CONCAT(typeart_guard_, __LINE__)
40

41
#define TYPEART_RUNTIME_GUARD     \
42
  typeart::RTGuard GUARDNAME;     \
43
  if (!GUARDNAME.shouldTrack()) { \
44
    return;                       \
45
  }
46

47
namespace typeart {
48

49
namespace detail {
50
template <class...>
51
constexpr std::false_type always_false{};
52
}  // namespace detail
53

54
template <typename Enum>
55
inline Enum operator|(Enum lhs, Enum rhs) {
354,666✔
56
  if constexpr (std::is_enum_v<Enum> && (std::is_same_v<Enum, AllocState> || std::is_same_v<Enum, FreeState>)) {
57
    using enum_t = typename std::underlying_type<Enum>::type;
58
    return static_cast<Enum>(static_cast<enum_t>(lhs) | static_cast<enum_t>(rhs));
354,666✔
59
  } else {
60
    static_assert(detail::always_false<Enum>);
61
  }
62
}
63
template <typename Enum>
64
inline void operator|=(Enum& lhs, Enum rhs) {
12,245✔
65
  if constexpr (std::is_enum_v<Enum> && (std::is_same_v<Enum, AllocState> || std::is_same_v<Enum, FreeState>)) {
66
    lhs = lhs | rhs;
12,245✔
67
  } else {
68
    static_assert(detail::always_false<Enum>);
69
  }
70
}
12,245✔
71

72
template <typename Enum>
73
inline Enum operator&(Enum lhs, Enum rhs) {
74
  if constexpr (std::is_enum_v<Enum> && std::is_same_v<Enum, AllocState>) {
408✔
75
    using enum_t = typename std::underlying_type<Enum>::type;
76
    return static_cast<Enum>(static_cast<enum_t>(lhs) & static_cast<enum_t>(rhs));
77
  } else {
78
    static_assert(detail::always_false<Enum>);
79
  }
80
}
81

82
template <typename Enum>
83
inline typename std::underlying_type<Enum>::type operator==(Enum lhs, Enum rhs) {
84
  if constexpr (std::is_enum_v<Enum> && std::is_same_v<Enum, AllocState>) {
85
    using enum_t = typename std::underlying_type<Enum>::type;
86
    return static_cast<enum_t>(lhs) & static_cast<enum_t>(rhs);
87
  } else {
88
    static_assert(detail::always_false<Enum>);
89
  }
90
}
91

92
using namespace debug;
93

94
namespace {
95
struct ThreadData final {
981✔
96
  RuntimeT::Stack stackVars;
97

98
  ThreadData() {
1,089✔
99
    stackVars.reserve(RuntimeT::StackReserve);
1,089✔
100
  }
1,089✔
101
};
102

103
thread_local ThreadData threadData;
1,089✔
104

105
}  // namespace
106

107
AllocationTracker::AllocationTracker(const TypeDB& db, Recorder& recorder) : typeDB{db}, runtime_recorder{recorder} {
1,216✔
108
}
1,216✔
109

110
void AllocationTracker::onAlloc(const void* addr, int typeId, size_t count, const void* retAddr) {
237,902✔
111
  const auto status = doAlloc(addr, typeId, count, retAddr);
237,902✔
112
  if (status != AllocState::ADDR_SKIPPED) {
237,902✔
113
    runtime_recorder.incHeapAlloc(typeId, count);
237,936✔
114
  }
237,936✔
115
  LOG_TRACE("Alloc " << toString(addr, typeId, count, retAddr, true) << " " << 'H');
237,974✔
116
}
238,000✔
117

118
void AllocationTracker::onAllocStack(const void* addr, int typeId, size_t count, const void* retAddr) {
101,201✔
119
  const auto status = doAlloc(addr, typeId, count, retAddr);
101,201✔
120
  if (status != AllocState::ADDR_SKIPPED) {
101,201✔
121
    threadData.stackVars.push_back(addr);
101,202✔
122
    runtime_recorder.incStackAlloc(typeId, count);
101,202✔
123
  }
101,202✔
124
  LOG_TRACE("Alloc " << toString(addr, typeId, count, retAddr) << " " << 'S');
101,203✔
125
}
101,205✔
126

127
void AllocationTracker::onAllocGlobal(const void* addr, int typeId, size_t count, const void* retAddr) {
3,194✔
128
  const auto status = doAlloc(addr, typeId, count, retAddr);
3,194✔
129
  if (status != AllocState::ADDR_SKIPPED) {
3,194✔
130
    runtime_recorder.incGlobalAlloc(typeId, count);
3,194✔
131
  }
3,194✔
132
  LOG_TRACE("Alloc " << toString(addr, typeId, count, retAddr) << " " << 'G');
3,194✔
133
}
3,194✔
134

135
AllocState AllocationTracker::doAlloc(const void* addr, int typeId, size_t count, const void* retAddr) {
342,318✔
136
  AllocState status = AllocState::NO_INIT;
342,318✔
137
  if (unlikely(!typeDB.isValid(typeId))) {
342,318✔
138
    status |= AllocState::UNKNOWN_ID;
42✔
139
    LOG_ERROR("Allocation of unknown type " << toString(addr, typeId, count, retAddr));
42✔
140
  }
42✔
141

142
  // Calling malloc with size 0 may return a nullptr or some address that can not be written to.
143
  // In the second case, the allocation is tracked anyway so that onFree() does not report an error.
144
  // On the other hand, an allocation on address 0x0 with size > 0 is an actual error.
145
  if (unlikely(count == 0)) {
342,318✔
146
    runtime_recorder.incZeroLengthAddr();
54✔
147
    status |= AllocState::ZERO_COUNT;
54✔
148
    LOG_WARNING("Zero-size allocation " << toString(addr, typeId, count, retAddr));
54✔
149
    if (addr == nullptr) {
54✔
150
      runtime_recorder.incZeroLengthAndNullAddr();
18✔
151
      LOG_ERROR("Zero-size and nullptr allocation " << toString(addr, typeId, count, retAddr));
18✔
152
      return status | AllocState::NULL_ZERO | AllocState::ADDR_SKIPPED;
18✔
153
    }
154
  } else if (unlikely(addr == nullptr)) {
342,300✔
155
    runtime_recorder.incNullAddr();
18✔
156
    LOG_ERROR("Nullptr allocation " << toString(addr, typeId, count, retAddr));
18✔
157
    return status | AllocState::NULL_PTR | AllocState::ADDR_SKIPPED;
18✔
158
  }
159

160
  const auto overridden = wrapper.put(addr, PointerInfo{typeId, count, retAddr});
342,282✔
161

162
  if (unlikely(overridden)) {
342,282✔
163
    runtime_recorder.incAddrReuse();
12,149✔
164
    status |= AllocState::ADDR_REUSE;
12,149✔
165
    LOG_WARNING("Pointer already in map " << toString(addr, typeId, count, retAddr));
12,149✔
166
    // LOG_WARNING("Overridden data in map " << toString(addr, def));
167
  }
12,149✔
168

169
  return status | AllocState::OK;
342,282✔
170
}
342,318✔
171

172
FreeState AllocationTracker::doFreeHeap(const void* addr, const void* retAddr) {
229,278✔
173
  if (unlikely(addr == nullptr)) {
229,278✔
174
    LOG_ERROR("Free on nullptr " << "(" << retAddr << ")");
18✔
175
    return FreeState::ADDR_SKIPPED | FreeState::NULL_PTR;
18✔
176
  }
177

178
  std::optional<PointerInfo> removed = wrapper.remove(addr);
229,260✔
179

180
  if (unlikely(!removed)) {
229,260✔
181
    LOG_ERROR("Free on unregistered address " << addr << " (" << retAddr << ")");
36✔
182
    return FreeState::ADDR_SKIPPED | FreeState::UNREG_ADDR;
36✔
183
  }
184

185
  LOG_TRACE("Free " << toString(addr, *removed));
229,224✔
186
  if constexpr (!std::is_same_v<Recorder, softcounter::NoneRecorder>) {
187
    runtime_recorder.incHeapFree(removed->typeId, removed->count);
229,194✔
188
  }
189
  return FreeState::OK;
229,194✔
190
}
229,286✔
191

192
void AllocationTracker::onFreeHeap(const void* addr, const void* retAddr) {
229,258✔
193
  const auto status = doFreeHeap(addr, retAddr);
229,258✔
194
  if (FreeState::OK == status) {
229,258✔
195
    runtime_recorder.decHeapAlloc();
229,208✔
196
  }
229,208✔
197
}
229,258✔
198

199
void AllocationTracker::onLeaveScope(int alloca_count, const void* retAddr) {
27,524✔
200
  if (unlikely(alloca_count > static_cast<int>(threadData.stackVars.size()))) {
27,524✔
201
    LOG_ERROR("Stack is smaller than requested de-allocation count. alloca_count: " << alloca_count << ". size: "
72✔
202
                                                                                    << threadData.stackVars.size());
203
    alloca_count = threadData.stackVars.size();
72✔
204
  }
72✔
205

206
  const auto cend      = threadData.stackVars.cend();
27,524✔
207
  const auto start_pos = (cend - alloca_count);
27,524✔
208
  LOG_TRACE("Freeing stack (" << alloca_count << ")  " << std::distance(start_pos, threadData.stackVars.cend()))
27,524✔
209

210
  wrapper.remove_range(start_pos, cend, [&](std::optional<PointerInfo>& removed, MemAddr addr) {
128,722✔
211
    if (unlikely(!removed)) {
101,202✔
212
      LOG_ERROR("Free on unregistered address " << addr << " (" << retAddr << ")");
7,349✔
213
    } else {
7,349✔
214
      LOG_TRACE("Free " << toString(addr, *removed));
93,853✔
215
      if constexpr (!std::is_same_v<Recorder, softcounter::NoneRecorder>) {
216
        runtime_recorder.incStackFree(removed->typeId, removed->count);
93,853✔
217
      }
218
    }
219
  });
101,202✔
220

221
  threadData.stackVars.erase(start_pos, cend);
27,520✔
222
  runtime_recorder.decStackAlloc(alloca_count);
27,520✔
223
  LOG_TRACE("Stack after free: " << threadData.stackVars.size());
27,520✔
224
}
27,536✔
225
// Base address
226
std::optional<RuntimeT::MapEntry> AllocationTracker::findBaseAlloc(const void* addr) {
12,627✔
227
  return wrapper.find(addr);
12,627✔
228
}
229

230
}  // namespace typeart
231

232
void __typeart_alloc(const void* addr, int typeId, size_t count) {
234,374✔
233
  TYPEART_RUNTIME_GUARD;
234,374✔
234
  const void* retAddr = __builtin_return_address(0);
234,374✔
235
  typeart::RuntimeSystem::get().allocation_tracker().onAlloc(addr, typeId, count, retAddr);
234,374✔
236
}
234,450✔
237

238
void __typeart_alloc_stack(const void* addr, int typeId, size_t count) {
73,162✔
239
  TYPEART_RUNTIME_GUARD;
73,162✔
240
  const void* retAddr = __builtin_return_address(0);
73,162✔
241
  typeart::RuntimeSystem::get().allocation_tracker().onAllocStack(addr, typeId, count, retAddr);
73,162✔
242
}
73,164✔
243

244
void __typeart_alloc_global(const void* addr, int typeId, size_t count) {
3,178✔
245
  TYPEART_RUNTIME_GUARD;
3,178✔
246
  const void* retAddr = __builtin_return_address(0);
3,178✔
247
  typeart::RuntimeSystem::get().allocation_tracker().onAllocGlobal(addr, typeId, count, retAddr);
3,178✔
248
}
3,178✔
249

250
void __typeart_free(const void* addr) {
226,076✔
251
  TYPEART_RUNTIME_GUARD;
226,076✔
252
  const void* retAddr = __builtin_return_address(0);
226,076✔
253
  typeart::RuntimeSystem::get().allocation_tracker().onFreeHeap(addr, retAddr);
226,076✔
254
}
226,100✔
255

256
void __typeart_leave_scope(int alloca_count) {
27,476✔
257
  TYPEART_RUNTIME_GUARD;
27,476✔
258
  const void* retAddr = __builtin_return_address(0);
27,476✔
259
  typeart::RuntimeSystem::get().allocation_tracker().onLeaveScope(alloca_count, retAddr);
27,476✔
260
}
27,478✔
261

262
void __typeart_alloc_omp(const void* addr, int typeId, size_t count) {
3,200✔
263
  TYPEART_RUNTIME_GUARD;
3,200✔
264
  const void* retAddr = __builtin_return_address(0);
3,200✔
265
  auto& rt            = typeart::RuntimeSystem::get();
3,200✔
266
  rt.allocation_tracker().onAlloc(addr, typeId, count, retAddr);
3,200✔
267
  rt.recorder.incOmpContextHeap();
3,200✔
268
}
3,200✔
269

270
void __typeart_alloc_stack_omp(const void* addr, int typeId, size_t count) {
312✔
271
  TYPEART_RUNTIME_GUARD;
312✔
272
  const void* retAddr = __builtin_return_address(0);
312✔
273
  auto& rt            = typeart::RuntimeSystem::get();
312✔
274
  rt.allocation_tracker().onAllocStack(addr, typeId, count, retAddr);
312✔
275
  rt.recorder.incOmpContextStack();
312✔
276
}
312✔
277

278
void __typeart_free_omp(const void* addr) {
3,200✔
279
  TYPEART_RUNTIME_GUARD;
3,200✔
280
  const void* retAddr = __builtin_return_address(0);
3,200✔
281
  auto& rt            = typeart::RuntimeSystem::get();
3,200✔
282
  rt.allocation_tracker().onFreeHeap(addr, retAddr);
3,200✔
283
  rt.recorder.incOmpContextFree();
3,200✔
284
}
3,200✔
285

286
void __typeart_leave_scope_omp(int alloca_count) {
48✔
287
  TYPEART_RUNTIME_GUARD;
48✔
288
  const void* retAddr = __builtin_return_address(0);
48✔
289
  typeart::RuntimeSystem::get().allocation_tracker().onLeaveScope(alloca_count, retAddr);
48✔
290
}
48✔
291

292
void __typeart_alloc_mty(const void* addr, const void* info, size_t count) {
396✔
293
  TYPEART_RUNTIME_GUARD;
396✔
294
  const void* retAddr = __builtin_return_address(0);
396✔
295
  const auto type_id  = reinterpret_cast<const typeart::global_types::GlobalTypeInfo*>(info)->type_id;
396✔
296
  auto& rt            = typeart::RuntimeSystem::get();
396✔
297
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
792✔
298
  rt.allocation_tracker().onAlloc(addr, type_id, count, retAddr);
396✔
299
}
396✔
300

301
void __typeart_alloc_stack_mty(const void* addr, const void* info, size_t count) {
27,732✔
302
  TYPEART_RUNTIME_GUARD;
27,732✔
303
  const void* retAddr = __builtin_return_address(0);
27,732✔
304
  const auto type_id  = reinterpret_cast<const typeart::global_types::GlobalTypeInfo*>(info)->type_id;
27,732✔
305
  auto& rt            = typeart::RuntimeSystem::get();
27,732✔
306
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
55,459✔
307
  rt.allocation_tracker().onAllocStack(addr, type_id, count, retAddr);
27,729✔
308
}
27,736✔
309

310
void __typeart_alloc_global_mty(const void* addr, const void* info, size_t count) {
16✔
311
  TYPEART_RUNTIME_GUARD;
16✔
312
  const void* retAddr = __builtin_return_address(0);
16✔
313
  const auto type_id  = reinterpret_cast<const typeart::global_types::GlobalTypeInfo*>(info)->type_id;
16✔
314
  auto& rt            = typeart::RuntimeSystem::get();
16✔
315
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
32✔
316
  rt.allocation_tracker().onAllocGlobal(addr, type_id, count, retAddr);
16✔
317
}
16✔
318

NEW
319
void __typeart_alloc_global_mty_omp(const void* addr, const void* info, size_t count) {
×
320
  TYPEART_RUNTIME_GUARD;
×
321
  const void* retAddr = __builtin_return_address(0);
×
322
  const auto type_id  = reinterpret_cast<const typeart::global_types::GlobalTypeInfo*>(info)->type_id;
×
323
  auto& rt            = typeart::RuntimeSystem::get();
×
324
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
×
325
  rt.allocation_tracker().onAlloc(addr, type_id, count, retAddr);
×
326
}
×
327

NEW
328
void __typeart_alloc_stack_mty_omp(const void* addr, const void* info, size_t count) {
×
329
  TYPEART_RUNTIME_GUARD;
×
330
  const void* retAddr = __builtin_return_address(0);
×
331
  const auto type_id  = reinterpret_cast<const typeart::global_types::GlobalTypeInfo*>(info)->type_id;
×
332
  auto& rt            = typeart::RuntimeSystem::get();
×
333
  assert(type_id == rt.type_translator().get_type_id_for(info) && "Type ID of global and lookup must match");
×
334
  rt.allocation_tracker().onAllocStack(addr, type_id, count, retAddr);
×
335
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc