• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 2143

16 May 2026 08:07PM UTC coverage: 80.544% (-0.009%) from 80.553%
2143

push

travis-ci

ivmai
Move free lists initialization code out of GC_init_thread_local
(refactoring)

* thread_local_alloc.c (init_freelists): New `static` function (move
code iterating over `kind` and `j` from `GC_init_thread_local()`.
* thread_local_alloc.c (GC_init_thread_local): Call `init_freelists()`
before `GC_setspecific()` one.

8 of 8 new or added lines in 1 file covered. (100.0%)

127 existing lines in 5 files now uncovered.

7224 of 8969 relevant lines covered (80.54%)

19852281.44 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.22
/thread_local_alloc.c
1
/*
2
 * Copyright (c) 2000-2005 by Hewlett-Packard Company.  All rights reserved.
3
 * Copyright (c) 2008-2025 Ivan Maidanski
4
 *
5
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
7
 *
8
 * Permission is hereby granted to use or copy this program
9
 * for any purpose, provided the above notices are retained on all copies.
10
 * Permission to modify the code and to distribute modified code is granted,
11
 * provided the above notices are retained, and a notice that the code was
12
 * modified is included with the above copyright notice.
13
 */
14

15
#include "private/gc_priv.h"
16

17
#if defined(THREAD_LOCAL_ALLOC)
18

19
#  if !defined(THREADS) && !defined(CPPCHECK)
20
#    error Invalid config - THREAD_LOCAL_ALLOC requires GC_THREADS
21
#  endif
22

23
#  include "private/thread_local_alloc.h"
24

25
#  if defined(USE_COMPILER_TLS)
26
__thread GC_ATTR_TLS_FAST
27
#  elif defined(USE_WIN32_COMPILER_TLS)
28
__declspec(thread) GC_ATTR_TLS_FAST
29
#  endif
30
    GC_key_t GC_thread_key;
31

32
#  if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
33
static GC_bool keys_initialized;
34
#  endif
35

36
#  ifndef GC_NO_DEINIT
37
GC_INNER void
38
GC_reset_thread_local_initialization(void)
×
39
{
40
#    if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
UNCOV
41
  keys_initialized = FALSE;
×
42
#    endif
43
  /* TODO: Dispose resources associated with `GC_thread_key`. */
UNCOV
44
}
×
45
#  endif
46

47
/* Initialize all the free lists of a thread-local storage structure. */
48
static void
49
init_freelists(GC_tlfs p)
88,964✔
50
{
51
  int kind, j;
52

53
  for (j = 0; j < GC_TINY_FREELISTS; ++j) {
2,313,064✔
54
    for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
8,896,400✔
55
      p->_freelists[kind][j] = NUMERIC_TO_VPTR(1);
6,672,300✔
56
    }
57
#  ifdef GC_GCJ_SUPPORT
58
    p->gcj_freelists[j] = NUMERIC_TO_VPTR(1);
2,224,100✔
59
#  endif
60
  }
61
  /*
62
   * The zero-sized free list is handled like the regular free list, to
63
   * ensure that the explicit deallocation works.  However, an allocation
64
   * of a `gcj` object with the zero size is always an error.
65
   */
66
#  ifdef GC_GCJ_SUPPORT
67
  p->gcj_freelists[0] = MAKE_CPTR(ERROR_FL);
88,964✔
68
#  endif
69
}
88,964✔
70

71
/*
72
 * Return a single nonempty free list `fl` to the global one pointed to
73
 * by `gfl`.
74
 */
75
static void
76
return_single_freelist(void *fl, void **gfl)
1,670✔
77
{
78
  if (NULL == *gfl) {
1,670✔
79
    *gfl = fl;
1,495✔
80
  } else {
81
    void *q = fl;
175✔
82
    void **q_ptr;
83

84
    GC_ASSERT(GC_size(fl) == GC_size(*gfl));
175✔
85
    /* Concatenate. */
86
    do {
87
      q_ptr = &obj_link(q);
7,629✔
88
      q = *q_ptr;
7,629✔
89
    } while (ADDR(q) >= HBLKSIZE);
7,629✔
90
    GC_ASSERT(NULL == q);
175✔
91
    *q_ptr = *gfl;
175✔
92
    *gfl = fl;
175✔
93
  }
94
}
1,670✔
95

96
/*
97
 * Recover the contents of the free-list array `fl` into the global one
98
 * `gfl`.
99
 */
100
static void
101
return_freelists(void **fl, void **gfl)
354,724✔
102
{
103
  int i;
104

105
  for (i = 1; i < GC_TINY_FREELISTS; ++i) {
8,868,100✔
106
    if (ADDR(fl[i]) >= HBLKSIZE) {
8,513,375✔
107
      return_single_freelist(fl[i], &gfl[i]);
1,670✔
108
    }
109
    /*
110
     * Clear `fl[i]`, since the thread structure may hang around.
111
     * Do it in a way that is likely to trap if we access it.
112
     */
113
    fl[i] = (ptr_t)NUMERIC_TO_VPTR(HBLKSIZE);
8,513,376✔
114
  }
115
  /* The 0 granule free list really contains 1 granule objects. */
116
  if (ADDR(fl[0]) >= HBLKSIZE
354,725✔
117
#  ifdef GC_GCJ_SUPPORT
118
      && ADDR(fl[0]) != ERROR_FL
88,681✔
119
#  endif
120
  ) {
UNCOV
121
    return_single_freelist(fl[0], &gfl[1]);
×
122
  }
123
}
354,725✔
124

125
#  ifdef USE_PTHREAD_SPECIFIC
126
/*
127
 * Re-set the TLS value on thread cleanup to allow thread-local allocations
128
 * to happen in the TLS destructors.  `GC_unregister_my_thread()` (and
129
 * similar routines) will finally set the `GC_thread_key` to `NULL`
130
 * preventing this destructor from being called repeatedly.
131
 */
132
static void
133
reset_thread_key(void *v)
134
{
135
  pthread_setspecific(GC_thread_key, v);
136
}
137
#  else
138
#    define reset_thread_key 0
139
#  endif
140

141
GC_INNER void
142
GC_init_thread_local(GC_tlfs p)
88,964✔
143
{
144
#  if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
145
  GC_ASSERT(I_HOLD_LOCK());
88,964✔
146
  if (UNLIKELY(!keys_initialized)) {
88,964✔
147
#    ifdef USE_CUSTOM_SPECIFIC
148
    /* Ensure proper alignment of a "pushed" GC symbol. */
149
    ASSERT_ALIGNMENT(&GC_thread_key);
150
#    endif
151
    if (GC_key_create(&GC_thread_key, reset_thread_key) != 0)
39✔
UNCOV
152
      ABORT("Failed to create key for local allocator");
×
153
    keys_initialized = TRUE;
39✔
154
  }
155
#  endif
156
  init_freelists(p);
88,964✔
157
#  if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
158
  if (GC_setspecific(GC_thread_key, p) != 0)
88,964✔
UNCOV
159
    ABORT("Failed to set thread specific allocation pointers");
×
160
#  else
161
  GC_thread_key = p;
162
#  endif
163
}
88,964✔
164

165
GC_INNER void
166
GC_destroy_thread_local(GC_tlfs p)
88,681✔
167
{
168
  int kind;
169

170
  GC_ASSERT(I_HOLD_LOCK());
88,681✔
171
  GC_ASSERT(GC_getspecific(GC_thread_key) == p);
88,681✔
172
  /* We currently only do this from the thread itself. */
173
  GC_STATIC_ASSERT(THREAD_FREELISTS_KINDS <= MAXOBJKINDS);
174
  for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
354,724✔
175
    if (kind == (int)GC_n_kinds) {
266,043✔
176
      /* The kind is not created. */
UNCOV
177
      break;
×
178
    }
179
    return_freelists(p->_freelists[kind], GC_obj_kinds[kind].ok_freelist);
266,043✔
180
  }
181
#  ifdef GC_GCJ_SUPPORT
182
  return_freelists(p->gcj_freelists, (void **)GC_gcjobjfreelist);
88,681✔
183
#  endif
184
}
88,681✔
185

186
STATIC void *
187
GC_get_tlfs(void)
200,015,930✔
188
{
189
#  if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
190
  if (UNLIKELY(!keys_initialized))
191
    return NULL;
192

193
  return GC_getspecific(GC_thread_key);
194
#  else
195
  GC_key_t k = GC_thread_key;
200,015,930✔
196

197
  if (UNLIKELY(0 == k)) {
200,015,930✔
198
    /*
199
     * We have not yet run `GC_init_parallel()`.  That means we also
200
     * are not locking, so `GC_malloc_kind_global()` is fairly cheap.
201
     */
UNCOV
202
    return NULL;
×
203
  }
204
  return GC_getspecific(k);
200,015,930✔
205
#  endif
206
}
207

208
GC_API GC_ATTR_MALLOC void *GC_CALL
209
GC_malloc_kind(size_t lb, int kind)
209,289,355✔
210
{
211
  size_t lg;
212
  void *tsd;
213
  void *result;
214

215
#  if MAXOBJKINDS > THREAD_FREELISTS_KINDS
216
  if (UNLIKELY(kind >= THREAD_FREELISTS_KINDS))
209,289,355✔
217
    return GC_malloc_kind_global(lb, kind);
9,273,425✔
218
#  endif
219
  tsd = GC_get_tlfs();
200,015,930✔
220
  if (UNLIKELY(NULL == tsd))
200,015,930✔
UNCOV
221
    return GC_malloc_kind_global(lb, kind);
×
222

223
  GC_ASSERT(GC_is_initialized);
200,015,930✔
224
  GC_ASSERT(GC_is_thread_tsd_valid(tsd));
200,015,930✔
225
  lg = ALLOC_REQUEST_GRANS(lb);
200,015,930✔
226
  GC_FAST_MALLOC_GRANS(
202,757,925✔
227
      result, lg, ((GC_tlfs)tsd)->_freelists[kind], DIRECT_GRANULES, kind,
228
      GC_malloc_kind_global(lb, kind),
229
      (void)(kind == PTRFREE ? NULL : (obj_link(result) = NULL)));
230
#  ifdef LOG_ALLOCS
231
  GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
232
                (unsigned long)lb, kind, result, (unsigned long)GC_gc_no);
233
#  endif
234
  return result;
200,015,930✔
235
}
236

237
#  ifdef GC_GCJ_SUPPORT
238

239
#    include "gc/gc_gcj.h"
240

241
GC_API GC_ATTR_MALLOC void *GC_CALL
242
GC_gcj_malloc(size_t lb, const void *vtable_ptr)
679,457✔
243
{
244
  void *result;
245
  void **tiny_fl;
246
  size_t lg;
247

248
  /*
249
   * Unlike the other thread-local allocation calls, we assume that the
250
   * collector has been explicitly initialized.
251
   */
252
  GC_ASSERT(GC_gcjobjfreelist != NULL);
679,457✔
253
#    if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
254
  GC_ASSERT(keys_initialized);
255
#    else
256
  GC_ASSERT(GC_thread_key != 0);
679,457✔
257
#    endif
258

259
  /*
260
   * `gcj`-style allocation without locks is extremely tricky.
261
   * The fundamental issue is that we may end up marking a free list,
262
   * which has free-list links instead of "vtable" pointers.
263
   * That is usually OK, since the next object on the free list will be
264
   * cleared, and will thus be interpreted as containing a zero descriptor.
265
   * That is fine if the object has not yet been initialized.  But there
266
   * are interesting potential races.  In the case of incremental
267
   * collection, this seems hopeless, since the marker may run
268
   * asynchronously, and may pick up the pointer to the next free-list
269
   * entry (which it thinks is a "vtable" pointer), get suspended for
270
   * a while, and then see an allocated object instead of the "vtable".
271
   * This may be avoidable with either a handshake with the collector or,
272
   * probably more easily, by moving the free list links to the second
273
   * "pointer-sized" word of each object.  The latter is not a universal
274
   * win, since on architecture like Itanium, nonzero offsets are not
275
   * necessarily free.  And there may be cache fill order issues.
276
   * For now, we punt with the incremental collection.  This probably means
277
   * that the incremental collection should be enabled before we create
278
   * a second thread.
279
   */
280
  if (UNLIKELY(GC_incremental))
679,457✔
281
    return GC_core_gcj_malloc(lb, vtable_ptr, 0 /* `flags` */);
566,752✔
282

283
  tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))->gcj_freelists;
112,705✔
284
  lg = ALLOC_REQUEST_GRANS(lb);
112,705✔
285

286
  /*
287
   * The provided `default_expr` below forces the initialization of the
288
   * "vtable" pointer.  This is necessary to ensure some very subtle
289
   * properties required if a garbage collection is run in the middle of
290
   * such an allocation.  Here we implicitly also assume atomicity for the
291
   * free list and method pointer assignments.  We must update the free list
292
   * before we store the pointer.  Otherwise a collection at this point
293
   * would see a corrupted free list.  A real memory barrier is not needed,
294
   * since the action of stopping this thread will cause prior writes
295
   * to complete.  We assert that any concurrent marker will stop us.
296
   * Thus it is impossible for a mark procedure to see the allocation of the
297
   * next object, but to see this object still containing a free-list pointer.
298
   * Otherwise the marker, by misinterpreting the free-list link as a "vtable"
299
   * pointer, might find a random "mark descriptor" in the next object.
300
   */
301
  GC_FAST_MALLOC_GRANS(
114,386✔
302
      result, lg, tiny_fl, DIRECT_GRANULES, GC_gcj_kind,
303
      GC_core_gcj_malloc(lb, vtable_ptr, 0 /* `flags` */), do {
304
        AO_compiler_barrier();
305
        *(const void **)result = vtable_ptr;
306
      } while (0));
307
  return result;
112,705✔
308
}
309

310
#  endif /* GC_GCJ_SUPPORT */
311

312
GC_INNER void
313
GC_mark_thread_local_fls_for(GC_tlfs p)
251,795✔
314
{
315
  int j;
316

317
  for (j = 0; j < GC_TINY_FREELISTS; ++j) {
6,546,670✔
318
    int kind;
319

320
    for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
25,179,500✔
321
      /*
322
       * Load the pointer atomically as it might be updated concurrently
323
       * by `GC_FAST_MALLOC_GRANS()`.
324
       */
325
      ptr_t q = GC_cptr_load((volatile ptr_t *)&p->_freelists[kind][j]);
18,884,625✔
326

327
      if (ADDR(q) > HBLKSIZE)
18,884,625✔
328
        GC_set_fl_marks(q);
1,619,532✔
329
    }
330
#  ifdef GC_GCJ_SUPPORT
331
    if (LIKELY(j > 0)) {
6,294,875✔
332
      ptr_t q = GC_cptr_load((volatile ptr_t *)&p->gcj_freelists[j]);
6,043,080✔
333

334
      if (ADDR(q) > HBLKSIZE)
6,043,080✔
335
        GC_set_fl_marks(q);
4,985✔
336
    }
337
#  endif
338
  }
339
}
251,795✔
340

341
#  if defined(GC_ASSERTIONS)
342
/* Check that all thread-local free-lists in `p` are completely marked. */
343
void
344
GC_check_tls_for(GC_tlfs p)
251,795✔
345
{
346
  int kind, j;
347

348
  for (j = 1; j < GC_TINY_FREELISTS; ++j) {
6,294,875✔
349
    for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
24,172,320✔
350
      GC_check_fl_marks(&p->_freelists[kind][j]);
18,129,240✔
351
    }
352
#    ifdef GC_GCJ_SUPPORT
353
    GC_check_fl_marks(&p->gcj_freelists[j]);
6,043,080✔
354
#    endif
355
  }
356
}
251,795✔
357
#  endif
358

359
#endif /* THREAD_LOCAL_ALLOC */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc