• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 2137

07 May 2026 03:10PM UTC coverage: 80.39% (+0.1%) from 80.268%
2137

push

travis-ci

ivmai
Adjust calling conventions for callbacks in msvc_dbg.c
(refactoring)

* extra/msvc_dbg.c (FunctionTableAccess, GetModuleBase): Change
calling convention from `CALLBACK` to `WINAPI`.

7207 of 8965 relevant lines covered (80.39%)

18162301.35 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.62
/thread_local_alloc.c
1
/*
2
 * Copyright (c) 2000-2005 by Hewlett-Packard Company.  All rights reserved.
3
 * Copyright (c) 2008-2025 Ivan Maidanski
4
 *
5
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
7
 *
8
 * Permission is hereby granted to use or copy this program
9
 * for any purpose, provided the above notices are retained on all copies.
10
 * Permission to modify the code and to distribute modified code is granted,
11
 * provided the above notices are retained, and a notice that the code was
12
 * modified is included with the above copyright notice.
13
 */
14

15
#include "private/gc_priv.h"
16

17
#if defined(THREAD_LOCAL_ALLOC)
18

19
#  if !defined(THREADS) && !defined(CPPCHECK)
20
#    error Invalid config - THREAD_LOCAL_ALLOC requires GC_THREADS
21
#  endif
22

23
#  include "private/thread_local_alloc.h"
24

25
#  if defined(USE_COMPILER_TLS)
26
__thread GC_ATTR_TLS_FAST
27
#  elif defined(USE_WIN32_COMPILER_TLS)
28
__declspec(thread) GC_ATTR_TLS_FAST
29
#  endif
30
    GC_key_t GC_thread_key;
31

32
static GC_bool keys_initialized;
33

34
#  ifndef GC_NO_DEINIT
35
GC_INNER void
36
GC_reset_thread_local_initialization(void)
×
37
{
38
  keys_initialized = FALSE;
×
39
  /* TODO: Dispose resources associated with `GC_thread_key`. */
40
}
×
41
#  endif
42

43
/*
44
 * Return a single nonempty free list `fl` to the global one pointed to
45
 * by `gfl`.
46
 */
47
static void
48
return_single_freelist(void *fl, void **gfl)
1,651✔
49
{
50
  if (NULL == *gfl) {
1,651✔
51
    *gfl = fl;
1,503✔
52
  } else {
53
    void *q = fl;
148✔
54
    void **q_ptr;
55

56
    GC_ASSERT(GC_size(fl) == GC_size(*gfl));
148✔
57
    /* Concatenate. */
58
    do {
59
      q_ptr = &obj_link(q);
6,665✔
60
      q = *q_ptr;
6,665✔
61
    } while (ADDR(q) >= HBLKSIZE);
6,665✔
62
    GC_ASSERT(NULL == q);
148✔
63
    *q_ptr = *gfl;
148✔
64
    *gfl = fl;
148✔
65
  }
66
}
1,651✔
67

68
/*
69
 * Recover the contents of the free-list array `fl` into the global one
70
 * `gfl`.
71
 */
72
static void
73
return_freelists(void **fl, void **gfl)
410,472✔
74
{
75
  int i;
76

77
  for (i = 1; i < GC_TINY_FREELISTS; ++i) {
10,261,800✔
78
    if (ADDR(fl[i]) >= HBLKSIZE) {
9,851,328✔
79
      return_single_freelist(fl[i], &gfl[i]);
1,651✔
80
    }
81
    /*
82
     * Clear `fl[i]`, since the thread structure may hang around.
83
     * Do it in a way that is likely to trap if we access it.
84
     */
85
    fl[i] = (ptr_t)NUMERIC_TO_VPTR(HBLKSIZE);
9,851,328✔
86
  }
87
  /* The 0 granule free list really contains 1 granule objects. */
88
  if (ADDR(fl[0]) >= HBLKSIZE
410,472✔
89
#  ifdef GC_GCJ_SUPPORT
90
      && ADDR(fl[0]) != ERROR_FL
102,618✔
91
#  endif
92
  ) {
93
    return_single_freelist(fl[0], &gfl[1]);
×
94
  }
95
}
410,472✔
96

97
#  ifdef USE_PTHREAD_SPECIFIC
98
/*
99
 * Re-set the TLS value on thread cleanup to allow thread-local allocations
100
 * to happen in the TLS destructors.  `GC_unregister_my_thread()` (and
101
 * similar routines) will finally set the `GC_thread_key` to `NULL`
102
 * preventing this destructor from being called repeatedly.
103
 */
104
static void
105
reset_thread_key(void *v)
106
{
107
  pthread_setspecific(GC_thread_key, v);
108
}
109
#  else
110
#    define reset_thread_key 0
111
#  endif
112

113
GC_INNER void
114
GC_init_thread_local(GC_tlfs p)
102,925✔
115
{
116
  int kind, j, res;
117

118
  GC_ASSERT(I_HOLD_LOCK());
102,925✔
119
  if (UNLIKELY(!keys_initialized)) {
102,925✔
120
#  ifdef USE_CUSTOM_SPECIFIC
121
    /* Ensure proper alignment of a "pushed" GC symbol. */
122
    ASSERT_ALIGNMENT(&GC_thread_key);
123
#  endif
124
    res = GC_key_create(&GC_thread_key, reset_thread_key);
39✔
125
    if (COVERT_DATAFLOW(res) != 0) {
39✔
126
      ABORT("Failed to create key for local allocator");
×
127
    }
128
    keys_initialized = TRUE;
39✔
129
  }
130
  res = GC_setspecific(GC_thread_key, p);
102,925✔
131
  if (COVERT_DATAFLOW(res) != 0) {
102,925✔
132
    ABORT("Failed to set thread specific allocation pointers");
×
133
  }
134
  for (j = 0; j < GC_TINY_FREELISTS; ++j) {
2,676,050✔
135
    for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
10,292,500✔
136
      p->_freelists[kind][j] = NUMERIC_TO_VPTR(1);
7,719,375✔
137
    }
138
#  ifdef GC_GCJ_SUPPORT
139
    p->gcj_freelists[j] = NUMERIC_TO_VPTR(1);
2,573,125✔
140
#  endif
141
  }
142
  /*
143
   * The zero-sized free list is handled like the regular free list, to
144
   * ensure that the explicit deallocation works.  However, an allocation
145
   * of a `gcj` object with the zero size is always an error.
146
   */
147
#  ifdef GC_GCJ_SUPPORT
148
  p->gcj_freelists[0] = MAKE_CPTR(ERROR_FL);
102,925✔
149
#  endif
150
}
102,925✔
151

152
GC_INNER void
153
GC_destroy_thread_local(GC_tlfs p)
102,618✔
154
{
155
  int kind;
156

157
  GC_ASSERT(I_HOLD_LOCK());
102,618✔
158
  GC_ASSERT(GC_getspecific(GC_thread_key) == p);
102,618✔
159
  /* We currently only do this from the thread itself. */
160
  GC_STATIC_ASSERT(THREAD_FREELISTS_KINDS <= MAXOBJKINDS);
161
  for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
410,471✔
162
    if (kind == (int)GC_n_kinds) {
307,854✔
163
      /* The kind is not created. */
164
      break;
×
165
    }
166
    return_freelists(p->_freelists[kind], GC_obj_kinds[kind].ok_freelist);
307,854✔
167
  }
168
#  ifdef GC_GCJ_SUPPORT
169
  return_freelists(p->gcj_freelists, (void **)GC_gcjobjfreelist);
102,617✔
170
#  endif
171
}
102,617✔
172

173
STATIC void *
174
GC_get_tlfs(void)
200,022,502✔
175
{
176
#  if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
177
  GC_key_t k = GC_thread_key;
200,022,502✔
178

179
  if (UNLIKELY(0 == k)) {
200,022,502✔
180
    /*
181
     * We have not yet run `GC_init_parallel()`.  That means we also
182
     * are not locking, so `GC_malloc_kind_global()` is fairly cheap.
183
     */
184
    return NULL;
×
185
  }
186
  return GC_getspecific(k);
200,022,502✔
187
#  else
188
  if (UNLIKELY(!keys_initialized))
189
    return NULL;
190

191
  return GC_getspecific(GC_thread_key);
192
#  endif
193
}
194

195
GC_API GC_ATTR_MALLOC void *GC_CALL
196
GC_malloc_kind(size_t lb, int kind)
209,294,335✔
197
{
198
  size_t lg;
199
  void *tsd;
200
  void *result;
201

202
#  if MAXOBJKINDS > THREAD_FREELISTS_KINDS
203
  if (UNLIKELY(kind >= THREAD_FREELISTS_KINDS))
209,294,335✔
204
    return GC_malloc_kind_global(lb, kind);
9,271,834✔
205
#  endif
206
  tsd = GC_get_tlfs();
200,022,501✔
207
  if (UNLIKELY(NULL == tsd))
200,022,501✔
208
    return GC_malloc_kind_global(lb, kind);
×
209

210
  GC_ASSERT(GC_is_initialized);
200,022,501✔
211
  GC_ASSERT(GC_is_thread_tsd_valid(tsd));
200,022,501✔
212
  lg = ALLOC_REQUEST_GRANS(lb);
200,022,501✔
213
  GC_FAST_MALLOC_GRANS(
202,789,905✔
214
      result, lg, ((GC_tlfs)tsd)->_freelists[kind], DIRECT_GRANULES, kind,
215
      GC_malloc_kind_global(lb, kind),
216
      (void)(kind == PTRFREE ? NULL : (obj_link(result) = NULL)));
217
#  ifdef LOG_ALLOCS
218
  GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
219
                (unsigned long)lb, kind, result, (unsigned long)GC_gc_no);
220
#  endif
221
  return result;
200,022,502✔
222
}
223

224
#  ifdef GC_GCJ_SUPPORT
225

226
#    include "gc/gc_gcj.h"
227

228
GC_API GC_ATTR_MALLOC void *GC_CALL
229
GC_gcj_malloc(size_t lb, const void *vtable_ptr)
680,043✔
230
{
231
  void *result;
232
  void **tiny_fl;
233
  size_t lg;
234

235
  /*
236
   * Unlike the other thread-local allocation calls, we assume that the
237
   * collector has been explicitly initialized.
238
   */
239
  GC_ASSERT(GC_gcjobjfreelist != NULL);
680,043✔
240
#    if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
241
  GC_ASSERT(keys_initialized);
242
#    else
243
  GC_ASSERT(GC_thread_key != 0);
680,043✔
244
#    endif
245

246
  /*
247
   * `gcj`-style allocation without locks is extremely tricky.
248
   * The fundamental issue is that we may end up marking a free list,
249
   * which has free-list links instead of "vtable" pointers.
250
   * That is usually OK, since the next object on the free list will be
251
   * cleared, and will thus be interpreted as containing a zero descriptor.
252
   * That is fine if the object has not yet been initialized.  But there
253
   * are interesting potential races.  In the case of incremental
254
   * collection, this seems hopeless, since the marker may run
255
   * asynchronously, and may pick up the pointer to the next free-list
256
   * entry (which it thinks is a "vtable" pointer), get suspended for
257
   * a while, and then see an allocated object instead of the "vtable".
258
   * This may be avoidable with either a handshake with the collector or,
259
   * probably more easily, by moving the free list links to the second
260
   * "pointer-sized" word of each object.  The latter is not a universal
261
   * win, since on architecture like Itanium, nonzero offsets are not
262
   * necessarily free.  And there may be cache fill order issues.
263
   * For now, we punt with the incremental collection.  This probably means
264
   * that the incremental collection should be enabled before we create
265
   * a second thread.
266
   */
267
  if (UNLIKELY(GC_incremental))
680,043✔
268
    return GC_core_gcj_malloc(lb, vtable_ptr, 0 /* `flags` */);
680,043✔
269

270
  tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))->gcj_freelists;
×
271
  lg = ALLOC_REQUEST_GRANS(lb);
×
272

273
  /*
274
   * The provided `default_expr` below forces the initialization of the
275
   * "vtable" pointer.  This is necessary to ensure some very subtle
276
   * properties required if a garbage collection is run in the middle of
277
   * such an allocation.  Here we implicitly also assume atomicity for the
278
   * free list and method pointer assignments.  We must update the free list
279
   * before we store the pointer.  Otherwise a collection at this point
280
   * would see a corrupted free list.  A real memory barrier is not needed,
281
   * since the action of stopping this thread will cause prior writes
282
   * to complete.  We assert that any concurrent marker will stop us.
283
   * Thus it is impossible for a mark procedure to see the allocation of the
284
   * next object, but to see this object still containing a free-list pointer.
285
   * Otherwise the marker, by misinterpreting the free-list link as a "vtable"
286
   * pointer, might find a random "mark descriptor" in the next object.
287
   */
288
  GC_FAST_MALLOC_GRANS(
×
289
      result, lg, tiny_fl, DIRECT_GRANULES, GC_gcj_kind,
290
      GC_core_gcj_malloc(lb, vtable_ptr, 0 /* `flags` */), do {
291
        AO_compiler_barrier();
292
        *(const void **)result = vtable_ptr;
293
      } while (0));
294
  return result;
×
295
}
296

297
#  endif /* GC_GCJ_SUPPORT */
298

299
GC_INNER void
300
GC_mark_thread_local_fls_for(GC_tlfs p)
257,812✔
301
{
302
  int j;
303

304
  for (j = 0; j < GC_TINY_FREELISTS; ++j) {
6,703,112✔
305
    int kind;
306

307
    for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
25,781,200✔
308
      /*
309
       * Load the pointer atomically as it might be updated concurrently
310
       * by `GC_FAST_MALLOC_GRANS()`.
311
       */
312
      ptr_t q = GC_cptr_load((volatile ptr_t *)&p->_freelists[kind][j]);
19,335,900✔
313

314
      if (ADDR(q) > HBLKSIZE)
19,335,900✔
315
        GC_set_fl_marks(q);
1,716,462✔
316
    }
317
#  ifdef GC_GCJ_SUPPORT
318
    if (LIKELY(j > 0)) {
6,445,300✔
319
      ptr_t q = GC_cptr_load((volatile ptr_t *)&p->gcj_freelists[j]);
6,187,488✔
320

321
      if (ADDR(q) > HBLKSIZE)
6,187,488✔
322
        GC_set_fl_marks(q);
×
323
    }
324
#  endif
325
  }
326
}
257,812✔
327

328
#  if defined(GC_ASSERTIONS)
329
/* Check that all thread-local free-lists in `p` are completely marked. */
330
void
331
GC_check_tls_for(GC_tlfs p)
257,791✔
332
{
333
  int kind, j;
334

335
  for (j = 1; j < GC_TINY_FREELISTS; ++j) {
6,444,775✔
336
    for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
24,747,936✔
337
      GC_check_fl_marks(&p->_freelists[kind][j]);
18,560,952✔
338
    }
339
#    ifdef GC_GCJ_SUPPORT
340
    GC_check_fl_marks(&p->gcj_freelists[j]);
6,186,984✔
341
#    endif
342
  }
343
}
257,791✔
344
#  endif
345

346
#endif /* THREAD_LOCAL_ALLOC */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc