• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 2055

25 Feb 2026 06:07PM UTC coverage: 77.66% (+0.4%) from 77.233%
2055

push

travis-ci

ivmai
Refine CORD description that empty C string is not a valid cord
(documentation)

* include/gc/cord.h (CORD): Add comment about empty C string.

6911 of 8899 relevant lines covered (77.66%)

17592593.07 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.62
/thread_local_alloc.c
1
/*
2
 * Copyright (c) 2000-2005 by Hewlett-Packard Company.  All rights reserved.
3
 * Copyright (c) 2008-2025 Ivan Maidanski
4
 *
5
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
7
 *
8
 * Permission is hereby granted to use or copy this program
9
 * for any purpose, provided the above notices are retained on all copies.
10
 * Permission to modify the code and to distribute modified code is granted,
11
 * provided the above notices are retained, and a notice that the code was
12
 * modified is included with the above copyright notice.
13
 */
14

15
#include "private/gc_priv.h"
16

17
#if defined(THREAD_LOCAL_ALLOC)
18

19
#  if !defined(THREADS) && !defined(CPPCHECK)
20
#    error Invalid config - THREAD_LOCAL_ALLOC requires GC_THREADS
21
#  endif
22

23
#  include "private/thread_local_alloc.h"
24

25
#  if defined(USE_COMPILER_TLS)
26
__thread GC_ATTR_TLS_FAST
27
#  elif defined(USE_WIN32_COMPILER_TLS)
28
__declspec(thread) GC_ATTR_TLS_FAST
29
#  endif
30
    GC_key_t GC_thread_key;
31

32
static GC_bool keys_initialized;
33

34
#  ifndef GC_NO_DEINIT
35
GC_INNER void
36
GC_reset_thread_local_initialization(void)
×
37
{
38
  keys_initialized = FALSE;
×
39
  /* TODO: Dispose resources associated with `GC_thread_key`. */
40
}
×
41
#  endif
42

43
/*
44
 * Return a single nonempty free list `fl` to the global one pointed to
45
 * by `gfl`.
46
 */
47
static void
48
return_single_freelist(void *fl, void **gfl)
1,724✔
49
{
50
  if (NULL == *gfl) {
1,724✔
51
    *gfl = fl;
1,527✔
52
  } else {
53
    void *q = fl;
197✔
54
    void **q_ptr;
55

56
    GC_ASSERT(GC_size(fl) == GC_size(*gfl));
197✔
57
    /* Concatenate. */
58
    do {
59
      q_ptr = &obj_link(q);
8,039✔
60
      q = *q_ptr;
8,039✔
61
    } while (ADDR(q) >= HBLKSIZE);
8,039✔
62
    GC_ASSERT(NULL == q);
197✔
63
    *q_ptr = *gfl;
197✔
64
    *gfl = fl;
197✔
65
  }
66
}
1,724✔
67

68
/*
69
 * Recover the contents of the free-list array `fl` into the global one
70
 * `gfl`.
71
 */
72
static void
73
return_freelists(void **fl, void **gfl)
368,488✔
74
{
75
  int i;
76

77
  for (i = 1; i < GC_TINY_FREELISTS; ++i) {
9,212,200✔
78
    if (ADDR(fl[i]) >= HBLKSIZE) {
8,843,712✔
79
      return_single_freelist(fl[i], &gfl[i]);
1,724✔
80
    }
81
    /*
82
     * Clear `fl[i]`, since the thread structure may hang around.
83
     * Do it in a way that is likely to trap if we access it.
84
     */
85
    fl[i] = (ptr_t)NUMERIC_TO_VPTR(HBLKSIZE);
8,843,712✔
86
  }
87
  /* The 0 granule free list really contains 1 granule objects. */
88
  if (ADDR(fl[0]) >= HBLKSIZE
368,488✔
89
#  ifdef GC_GCJ_SUPPORT
90
      && ADDR(fl[0]) != ERROR_FL
92,122✔
91
#  endif
92
  ) {
93
    return_single_freelist(fl[0], &gfl[1]);
×
94
  }
95
}
368,488✔
96

97
#  ifdef USE_PTHREAD_SPECIFIC
98
/*
99
 * Re-set the TLS value on thread cleanup to allow thread-local allocations
100
 * to happen in the TLS destructors.  `GC_unregister_my_thread()` (and
101
 * similar routines) will finally set the `GC_thread_key` to `NULL`
102
 * preventing this destructor from being called repeatedly.
103
 */
104
static void
105
reset_thread_key(void *v)
106
{
107
  pthread_setspecific(GC_thread_key, v);
108
}
109
#  else
110
#    define reset_thread_key 0
111
#  endif
112

113
GC_INNER void
114
GC_init_thread_local(GC_tlfs p)
92,402✔
115
{
116
  int kind, j, res;
117

118
  GC_ASSERT(I_HOLD_LOCK());
92,402✔
119
  if (UNLIKELY(!keys_initialized)) {
92,402✔
120
#  ifdef USE_CUSTOM_SPECIFIC
121
    /* Ensure proper alignment of a "pushed" GC symbol. */
122
    GC_ASSERT(ADDR(&GC_thread_key) % ALIGNMENT == 0);
123
#  endif
124
    res = GC_key_create(&GC_thread_key, reset_thread_key);
35✔
125
    if (COVERT_DATAFLOW(res) != 0) {
35✔
126
      ABORT("Failed to create key for local allocator");
×
127
    }
128
    keys_initialized = TRUE;
35✔
129
  }
130
  res = GC_setspecific(GC_thread_key, p);
92,402✔
131
  if (COVERT_DATAFLOW(res) != 0) {
92,402✔
132
    ABORT("Failed to set thread specific allocation pointers");
×
133
  }
134
  for (j = 0; j < GC_TINY_FREELISTS; ++j) {
2,402,452✔
135
    for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
9,240,200✔
136
      p->_freelists[kind][j] = NUMERIC_TO_VPTR(1);
6,930,150✔
137
    }
138
#  ifdef GC_GCJ_SUPPORT
139
    p->gcj_freelists[j] = NUMERIC_TO_VPTR(1);
2,310,050✔
140
#  endif
141
  }
142
  /*
143
   * The zero-sized free list is handled like the regular free list, to
144
   * ensure that the explicit deallocation works.  However, an allocation
145
   * of a `gcj` object with the zero size is always an error.
146
   */
147
#  ifdef GC_GCJ_SUPPORT
148
  p->gcj_freelists[0] = MAKE_CPTR(ERROR_FL);
92,402✔
149
#  endif
150
}
92,402✔
151

152
GC_INNER void
153
GC_destroy_thread_local(GC_tlfs p)
92,122✔
154
{
155
  int kind;
156

157
  GC_ASSERT(I_HOLD_LOCK());
92,122✔
158
  GC_ASSERT(GC_getspecific(GC_thread_key) == p);
92,122✔
159
  /* We currently only do this from the thread itself. */
160
  GC_STATIC_ASSERT(THREAD_FREELISTS_KINDS <= MAXOBJKINDS);
161
  for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
368,488✔
162
    if (kind == (int)GC_n_kinds) {
276,366✔
163
      /* The kind is not created. */
164
      break;
×
165
    }
166
    return_freelists(p->_freelists[kind], GC_obj_kinds[kind].ok_freelist);
276,366✔
167
  }
168
#  ifdef GC_GCJ_SUPPORT
169
  return_freelists(p->gcj_freelists, (void **)GC_gcjobjfreelist);
92,122✔
170
#  endif
171
}
92,122✔
172

173
STATIC void *
174
GC_get_tlfs(void)
200,017,554✔
175
{
176
#  if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
177
  GC_key_t k = GC_thread_key;
200,017,554✔
178

179
  if (UNLIKELY(0 == k)) {
200,017,554✔
180
    /*
181
     * We have not yet run `GC_init_parallel()`.  That means we also
182
     * are not locking, so `GC_malloc_kind_global()` is fairly cheap.
183
     */
184
    return NULL;
×
185
  }
186
  return GC_getspecific(k);
200,017,554✔
187
#  else
188
  if (UNLIKELY(!keys_initialized))
189
    return NULL;
190

191
  return GC_getspecific(GC_thread_key);
192
#  endif
193
}
194

195
GC_API GC_ATTR_MALLOC void *GC_CALL
196
GC_malloc_kind(size_t lb, int kind)
209,290,887✔
197
{
198
  size_t lg;
199
  void *tsd;
200
  void *result;
201

202
#  if MAXOBJKINDS > THREAD_FREELISTS_KINDS
203
  if (UNLIKELY(kind >= THREAD_FREELISTS_KINDS))
209,290,887✔
204
    return GC_malloc_kind_global(lb, kind);
9,273,333✔
205
#  endif
206
  tsd = GC_get_tlfs();
200,017,554✔
207
  if (UNLIKELY(NULL == tsd))
200,017,554✔
208
    return GC_malloc_kind_global(lb, kind);
×
209

210
  GC_ASSERT(GC_is_initialized);
200,017,554✔
211
  GC_ASSERT(GC_is_thread_tsd_valid(tsd));
200,017,554✔
212
  lg = ALLOC_REQUEST_GRANS(lb);
200,017,554✔
213
#  if defined(CPPCHECK)
214
#    define MALLOC_KIND_PTRFREE_INIT (void *)1
215
#  else
216
#    define MALLOC_KIND_PTRFREE_INIT NULL
217
#  endif
218
  GC_FAST_MALLOC_GRANS(result, lg, ((GC_tlfs)tsd)->_freelists[kind],
202,781,836✔
219
                       DIRECT_GRANULES, kind, GC_malloc_kind_global(lb, kind),
220
                       (void)(kind == PTRFREE ? MALLOC_KIND_PTRFREE_INIT
221
                                              : (obj_link(result) = NULL)));
222
#  ifdef LOG_ALLOCS
223
  GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
224
                (unsigned long)lb, kind, result, (unsigned long)GC_gc_no);
225
#  endif
226
  return result;
200,017,554✔
227
}
228

229
#  ifdef GC_GCJ_SUPPORT
230

231
#    include "gc/gc_gcj.h"
232

233
GC_API GC_ATTR_MALLOC void *GC_CALL
234
GC_gcj_malloc(size_t lb, const void *vtable_ptr)
680,566✔
235
{
236
  void *result;
237
  void **tiny_fl;
238
  size_t lg;
239

240
  /*
241
   * Unlike the other thread-local allocation calls, we assume that the
242
   * collector has been explicitly initialized.
243
   */
244
  GC_ASSERT(GC_gcjobjfreelist != NULL);
680,566✔
245
#    if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
246
  GC_ASSERT(keys_initialized);
247
#    else
248
  GC_ASSERT(GC_thread_key != 0);
680,566✔
249
#    endif
250

251
  /*
252
   * `gcj`-style allocation without locks is extremely tricky.
253
   * The fundamental issue is that we may end up marking a free list,
254
   * which has free-list links instead of "vtable" pointers.
255
   * That is usually OK, since the next object on the free list will be
256
   * cleared, and will thus be interpreted as containing a zero descriptor.
257
   * That is fine if the object has not yet been initialized.  But there
258
   * are interesting potential races.  In the case of incremental
259
   * collection, this seems hopeless, since the marker may run
260
   * asynchronously, and may pick up the pointer to the next free-list
261
   * entry (which it thinks is a "vtable" pointer), get suspended for
262
   * a while, and then see an allocated object instead of the "vtable".
263
   * This may be avoidable with either a handshake with the collector or,
264
   * probably more easily, by moving the free list links to the second
265
   * "pointer-sized" word of each object.  The latter is not a universal
266
   * win, since on architecture like Itanium, nonzero offsets are not
267
   * necessarily free.  And there may be cache fill order issues.
268
   * For now, we punt with the incremental collection.  This probably means
269
   * that the incremental collection should be enabled before we create
270
   * a second thread.
271
   */
272
  if (UNLIKELY(GC_incremental))
680,566✔
273
    return GC_core_gcj_malloc(lb, vtable_ptr, 0 /* `flags` */);
680,566✔
274

275
  tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))->gcj_freelists;
×
276
  lg = ALLOC_REQUEST_GRANS(lb);
×
277

278
  /*
279
   * The provided `default_expr` below forces the initialization of the
280
   * "vtable" pointer.  This is necessary to ensure some very subtle
281
   * properties required if a garbage collection is run in the middle of
282
   * such an allocation.  Here we implicitly also assume atomicity for the
283
   * free list and method pointer assignments.  We must update the free list
284
   * before we store the pointer.  Otherwise a collection at this point
285
   * would see a corrupted free list.  A real memory barrier is not needed,
286
   * since the action of stopping this thread will cause prior writes
287
   * to complete.  We assert that any concurrent marker will stop us.
288
   * Thus it is impossible for a mark procedure to see the allocation of the
289
   * next object, but to see this object still containing a free-list pointer.
290
   * Otherwise the marker, by misinterpreting the free-list link as a "vtable"
291
   * pointer, might find a random "mark descriptor" in the next object.
292
   */
293
  GC_FAST_MALLOC_GRANS(
×
294
      result, lg, tiny_fl, DIRECT_GRANULES, GC_gcj_kind,
295
      GC_core_gcj_malloc(lb, vtable_ptr, 0 /* `flags` */), do {
296
        AO_compiler_barrier();
297
        *(const void **)result = vtable_ptr;
298
      } while (0));
299
  return result;
×
300
}
301

302
#  endif /* GC_GCJ_SUPPORT */
303

304
GC_INNER void
305
GC_mark_thread_local_fls_for(GC_tlfs p)
253,014✔
306
{
307
  ptr_t q;
308
  int kind, j;
309

310
  for (j = 0; j < GC_TINY_FREELISTS; ++j) {
6,578,364✔
311
    for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
25,301,400✔
312
      /*
313
       * Load the pointer atomically as it might be updated concurrently
314
       * by `GC_FAST_MALLOC_GRANS()`.
315
       */
316
      q = GC_cptr_load((volatile ptr_t *)&p->_freelists[kind][j]);
18,976,050✔
317
      if (ADDR(q) > HBLKSIZE)
18,976,050✔
318
        GC_set_fl_marks(q);
1,624,163✔
319
    }
320
#  ifdef GC_GCJ_SUPPORT
321
    if (LIKELY(j > 0)) {
6,325,350✔
322
      q = GC_cptr_load((volatile ptr_t *)&p->gcj_freelists[j]);
6,072,336✔
323
      if (ADDR(q) > HBLKSIZE)
6,072,336✔
324
        GC_set_fl_marks(q);
×
325
    }
326
#  endif
327
  }
328
}
253,014✔
329

330
#  if defined(GC_ASSERTIONS)
331
/* Check that all thread-local free-lists in `p` are completely marked. */
332
void
333
GC_check_tls_for(GC_tlfs p)
253,014✔
334
{
335
  int kind, j;
336

337
  for (j = 1; j < GC_TINY_FREELISTS; ++j) {
6,325,350✔
338
    for (kind = 0; kind < THREAD_FREELISTS_KINDS; ++kind) {
24,289,344✔
339
      GC_check_fl_marks(&p->_freelists[kind][j]);
18,217,008✔
340
    }
341
#    ifdef GC_GCJ_SUPPORT
342
    GC_check_fl_marks(&p->gcj_freelists[j]);
6,072,336✔
343
#    endif
344
  }
345
}
253,014✔
346
#  endif
347

348
#endif /* THREAD_LOCAL_ALLOC */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc