• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 1976

22 Dec 2025 04:35AM UTC coverage: 73.579% (+0.6%) from 72.991%
1976

push

travis-ci

ivmai
Control redirection internally using REDIRECT_MALLOC_DEBUG/UNCOLLECTABLE
(refactoring)

This commit replaces `REDIRECT_MALLOC`, `REDIRECT_REALLOC`,
`REDIRECT_FREE` control macros as follows:
- `-D REDIRECT_MALLOC=GC_malloc` to `-D REDIRECT_MALLOC`;
- `-D REDIRECT_MALLOC=GC_malloc_uncollectable` to
  `-D REDIRECT_MALLOC_UNCOLLECTABLE`;
- `-D REDIRECT_MALLOC=GC_debug_malloc_replacement`,
  `-D REDIRECT_REALLOC=GC_debug_realloc_replacement`,
  `-D REDIRECT_FREE=GC_debug_free` to `-D REDIRECT_MALLOC_DEBUG`;
- `-D REDIRECT_MALLOC=GC_debug_malloc_uncollectable_replacement`,
  `-D REDIRECT_REALLOC=GC_debug_realloc_replacement`,
  `-D REDIRECT_FREE=GC_debug_free` to
  `-D REDIRECT_MALLOC_DEBUG -D REDIRECT_MALLOC_UNCOLLECTABLE`.

* CMakeLists.txt [enable_redirect_malloc]: Define `REDIRECT_MALLOC`
C macro (to the default value instead of `GC_debug_malloc_replacement`,
`GC_debug_malloc_uncollectable_replacement`, `GC_malloc_uncollectable`,
`GC_malloc`).
* build.zig [enable_redirect_malloc] (build): Likewise.
* configure.ac [$enable_redirect_malloc==yes]: Likewise.
* CMakeLists.txt [enable_redirect_malloc && enable_gc_debug]: Define
`REDIRECT_MALLOC_DEBUG` C macro; add comment.
* build.zig [enable_redirect_malloc && enable_gc_debug] (build):
Likewise.
* configure.ac [$enable_redirect_malloc==yes && $enable_gc_debug==yes]:
Likewise.
* CMakeLists.txt [enable_redirect_malloc
&& enable_uncollectable_redirection]: Define
`REDIRECT_MALLOC_UNCOLLECTABLE` C macro; add comment.
* build.zig [enable_redirect_malloc
&& enable_uncollectable_redirection] (build): Likewise.
* configure.ac [$enable_redirect_malloc==yes
&& $enable_uncollectable_redirection==yes]: Likewise.
* CMakeLists.txt [enable_redirect_malloc && enable_gc_debug]
(REDIRECT_REALLOC, REDIRECT_FREE): Do not define C macro.
* Makefile.direct (CFLAGS): Change `-D REDIRECT_MALLOC=GC_malloc` to
`-D REDIRECT_MALLOC` in comment.
* dbg_mlc.c [USE_PROC_FOR_LIBRARIES] (GC_debug_free): Check
`REDIRECT_MALLOC` instead of `R... (continued)

6511 of 8849 relevant lines covered (73.58%)

13949936.61 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.43
/mallocx.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
6
 * Copyright (c) 2009-2025 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_priv.h"
19

20
/*
21
 * These are extra allocation routines that are likely to be less
22
 * frequently used than those in `malloc.c` file.  They are separate in
23
 * the hope that the `.o` file will be excluded from statically linked
24
 * executables.  We should probably break this up further.
25
 */
26

27
#include <string.h>
28

29
#ifndef MSWINCE
30
#  include <errno.h>
31
#endif
32

33
/*
34
 * Some externally visible but unadvertised variables to allow access
35
 * to free lists from inlined allocators without include `gc_priv.h` file
36
 * or introducing dependencies on internal data structure layouts.
37
 */
38
#include "private/gc_alloc_ptrs.h"
39
void **const GC_objfreelist_ptr = GC_objfreelist;
40
void **const GC_aobjfreelist_ptr = GC_aobjfreelist;
41
void **const GC_uobjfreelist_ptr = GC_uobjfreelist;
42
#ifdef GC_ATOMIC_UNCOLLECTABLE
43
void **const GC_auobjfreelist_ptr = GC_auobjfreelist;
44
#endif
45

46
GC_API int GC_CALL
47
GC_get_kind_and_size(const void *p, size_t *psize)
840,336✔
48
{
49
  const hdr *hhdr = HDR(p);
840,336✔
50

51
  if (psize != NULL) {
840,336✔
52
    *psize = hhdr->hb_sz;
420,168✔
53
  }
54
  return hhdr->hb_obj_kind;
840,336✔
55
}
56

57
GC_API GC_ATTR_MALLOC void *GC_CALL
58
GC_generic_or_special_malloc(size_t lb, int kind)
2,920,336✔
59
{
60
  switch (kind) {
2,920,336✔
61
  case PTRFREE:
2,920,252✔
62
  case NORMAL:
63
    return GC_malloc_kind(lb, kind);
2,920,252✔
64
  case UNCOLLECTABLE:
84✔
65
#ifdef GC_ATOMIC_UNCOLLECTABLE
66
  case AUNCOLLECTABLE:
67
#endif
68
    return GC_generic_malloc_uncollectable(lb, kind);
84✔
69
  default:
×
70
    return GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */, 0);
×
71
  }
72
}
73

74
GC_API void *GC_CALL
75
GC_realloc(void *p, size_t lb)
20,000,252✔
76
{
77
  hdr *hhdr;
78
  void *result;
79
#if defined(_FORTIFY_SOURCE) && defined(__GNUC__) && !defined(__clang__)
80
  /*
81
   * Use `cleared_p` instead of `p` as a workaround to avoid passing
82
   * `alloc_size(lb)` attribute associated with `p` to `memset`
83
   * (including a `memset` call inside `GC_free`).
84
   */
85
  volatile GC_uintptr_t cleared_p = (GC_uintptr_t)p;
86
#else
87
#  define cleared_p p
88
#endif
89
  size_t sz;      /*< current size in bytes */
90
  size_t orig_sz; /*< original `sz` (in bytes) */
91
  int obj_kind;
92

93
  if (NULL == p) {
20,000,252✔
94
    /* Required by ANSI. */
95
    return GC_malloc(lb);
×
96
  }
97
  if (0 == lb) /* `&& p != NULL` */ {
20,000,252✔
98
#ifndef IGNORE_FREE
99
    GC_free(p);
×
100
#endif
101
    return NULL;
×
102
  }
103
  hhdr = HDR(HBLKPTR(p));
20,000,252✔
104
  sz = hhdr->hb_sz;
20,000,252✔
105
  obj_kind = hhdr->hb_obj_kind;
20,000,252✔
106
  orig_sz = sz;
20,000,252✔
107

108
  if (sz > MAXOBJBYTES) {
20,000,252✔
109
    const struct obj_kind *ok = &GC_obj_kinds[obj_kind];
168✔
110
    word descr = ok->ok_descriptor;
168✔
111

112
    /* Round it up to the next whole heap block. */
113
    sz = (sz + HBLKSIZE - 1) & ~(HBLKSIZE - 1);
168✔
114
#if ALIGNMENT > GC_DS_TAGS
115
    /*
116
     * An extra byte is not added in case of ignore-off-page allocated
117
     * objects not smaller than `HBLKSIZE`.
118
     */
119
    GC_ASSERT(sz >= HBLKSIZE);
168✔
120
    if (EXTRA_BYTES != 0 && (hhdr->hb_flags & IGNORE_OFF_PAGE) != 0
168✔
121
        && obj_kind == NORMAL)
×
122
      descr += ALIGNMENT; /*< or set to 0 */
×
123
#endif
124
    if (ok->ok_relocate_descr) {
168✔
125
      descr += sz;
168✔
126
    }
127

128
    /*
129
     * `GC_realloc` might be changing the block size while
130
     * `GC_reclaim_block` or `GC_clear_hdr_marks` is examining it.
131
     * The change to the size field is benign, in that `GC_reclaim`
132
     * (and `GC_clear_hdr_marks`) would work correctly with either
133
     * value, since we are not changing the number of objects in
134
     * the block.  But seeing a half-updated value (though unlikely
135
     * to occur in practice) could be probably bad.
136
     * Using unordered atomic accesses on `hb_sz` and `hb_descr`
137
     * fields would solve the issue.  (The alternate solution might
138
     * be to initially overallocate large objects, so we do not
139
     * have to adjust the size in `GC_realloc`, if they still fit.
140
     * But that is probably more expensive, since we may end up
141
     * scanning a bunch of zeros during the collection.)
142
     */
143
#ifdef AO_HAVE_store
144
    AO_store(&hhdr->hb_sz, sz);
168✔
145
    AO_store((AO_t *)&hhdr->hb_descr, descr);
168✔
146
#else
147
    {
148
      LOCK();
149
      hhdr->hb_sz = sz;
150
      hhdr->hb_descr = descr;
151
      UNLOCK();
152
    }
153
#endif
154

155
#ifdef MARK_BIT_PER_OBJ
156
    GC_ASSERT(hhdr->hb_inv_sz == LARGE_INV_SZ);
157
#else
158
    GC_ASSERT((hhdr->hb_flags & LARGE_BLOCK) != 0
168✔
159
              && hhdr->hb_map[ANY_INDEX] == 1);
160
#endif
161
    if (IS_UNCOLLECTABLE(obj_kind))
168✔
162
      GC_non_gc_bytes += (sz - orig_sz);
×
163
    /* Extra area is already cleared by `GC_alloc_large_and_clear`. */
164
  }
165
  if (ADD_EXTRA_BYTES(lb) <= sz) {
20,000,252✔
166
    if (lb >= (sz >> 1)) {
17,500,084✔
167
      if (orig_sz > lb) {
17,500,084✔
168
        /* Clear unneeded part of object to avoid bogus pointer tracing. */
169
        BZERO((ptr_t)cleared_p + lb, orig_sz - lb);
17,500,000✔
170
      }
171
      return p;
17,500,084✔
172
    }
173
    /*
174
     * Shrink it.  Note: shrinking of large blocks is not implemented
175
     * efficiently.
176
     */
177
    sz = lb;
×
178
  }
179
  result = GC_generic_or_special_malloc((word)lb, obj_kind);
2,500,168✔
180
  if (LIKELY(result != NULL)) {
2,500,168✔
181
    /*
182
     * In case of shrink, it could also return original object.
183
     * But this gives the client warning of imminent disaster.
184
     */
185
    BCOPY(p, result, sz);
2,500,168✔
186
#ifndef IGNORE_FREE
187
    GC_free((ptr_t)cleared_p);
2,500,168✔
188
#endif
189
  }
190
  return result;
2,500,168✔
191
#undef cleared_p
192
}
193

194
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
195
#  ifdef REDIRECT_MALLOC_DEBUG
196
#    define REDIRECT_REALLOC_F GC_debug_realloc_replacement
197
/* As with `malloc`, avoid two levels of extra calls here. */
198
#    define GC_debug_realloc_replacement(p, lb) \
199
      GC_debug_realloc(p, lb, GC_DBG_EXTRAS)
200
#  else
201
#    define REDIRECT_REALLOC_F GC_realloc
202
#  endif
203

204
void *
205
realloc(void *p, size_t lb)
206
{
207
  return REDIRECT_REALLOC_F(p, lb);
208
}
209

210
#  undef GC_debug_realloc_replacement
211
#endif
212

213
GC_API GC_ATTR_MALLOC void *GC_CALL
214
GC_generic_malloc_ignore_off_page(size_t lb, int kind)
420,000✔
215
{
216
  return GC_generic_malloc_aligned(lb, kind, IGNORE_OFF_PAGE,
420,000✔
217
                                   0 /* `align_m1` */);
218
}
219

220
GC_API GC_ATTR_MALLOC void *GC_CALL
221
GC_malloc_ignore_off_page(size_t lb)
420,000✔
222
{
223
  return GC_generic_malloc_aligned(lb, NORMAL, IGNORE_OFF_PAGE, 0);
420,000✔
224
}
225

226
GC_API GC_ATTR_MALLOC void *GC_CALL
227
GC_malloc_atomic_ignore_off_page(size_t lb)
420,002✔
228
{
229
  return GC_generic_malloc_aligned(lb, PTRFREE, IGNORE_OFF_PAGE, 0);
420,002✔
230
}
231

232
/*
233
 * Increment `GC_bytes_allocd` from code that does not have direct access
234
 * to `GC_arrays`.
235
 */
236
void GC_CALL
237
GC_incr_bytes_allocd(size_t n)
42✔
238
{
239
  GC_bytes_allocd += n;
42✔
240
}
42✔
241

242
/* The same as `GC_incr_bytes_allocd` but for `GC_bytes_freed`. */
243
void GC_CALL
244
GC_incr_bytes_freed(size_t n)
42✔
245
{
246
  GC_bytes_freed += n;
42✔
247
}
42✔
248

249
GC_API size_t GC_CALL
250
GC_get_expl_freed_bytes_since_gc(void)
2,858,800✔
251
{
252
  return (size_t)GC_bytes_freed;
2,858,800✔
253
}
254

255
#ifdef PARALLEL_MARK
256
static void
257
acquire_mark_lock_notify_builders(void)
461,744✔
258
{
259
  GC_acquire_mark_lock();
461,744✔
260
  --GC_fl_builder_count;
461,744✔
261
  if (0 == GC_fl_builder_count)
461,744✔
262
    GC_notify_all_builder();
379,039✔
263
  GC_release_mark_lock();
461,744✔
264
}
461,744✔
265
#endif
266

267
GC_API void GC_CALL
268
GC_generic_malloc_many(size_t lb_adjusted, int kind, void **result)
2,724,963✔
269
{
270
  void *op;
271
  void *p;
272
  void **opp;
273
  /* The value of `lb_adjusted` converted to granules. */
274
  size_t lg;
275
  word my_bytes_allocd = 0;
2,724,963✔
276
  struct obj_kind *ok;
277
  struct hblk **rlh;
278

279
  GC_ASSERT(lb_adjusted != 0 && (lb_adjusted & (GC_GRANULE_BYTES - 1)) == 0);
2,724,963✔
280
  /* Currently a single object is always allocated if manual VDB. */
281
  /*
282
   * TODO: `GC_dirty` should be called for each linked object (but the
283
   * last one) to support multiple objects allocation.
284
   */
285
  if (UNLIKELY(lb_adjusted > MAXOBJBYTES) || GC_manual_vdb) {
2,724,963✔
286
    op = GC_generic_malloc_aligned(lb_adjusted - EXTRA_BYTES, kind,
×
287
                                   0 /* `flags` */, 0 /* `align_m1` */);
288
    if (LIKELY(op != NULL))
×
289
      obj_link(op) = NULL;
×
290
    *result = op;
×
291
#ifndef NO_MANUAL_VDB
292
    if (GC_manual_vdb && GC_is_heap_ptr(result)) {
×
293
      GC_dirty_inner(result);
×
294
      REACHABLE_AFTER_DIRTY(op);
×
295
    }
296
#endif
297
    return;
1,652,586✔
298
  }
299

300
  GC_ASSERT(kind < MAXOBJKINDS);
2,724,963✔
301
  lg = BYTES_TO_GRANULES(lb_adjusted);
2,724,963✔
302
  if (UNLIKELY(get_have_errors()))
2,724,963✔
303
    GC_print_all_errors();
40✔
304
  GC_notify_or_invoke_finalizers();
2,724,963✔
305
  GC_DBG_COLLECT_AT_MALLOC(lb_adjusted - EXTRA_BYTES);
306
  if (UNLIKELY(!GC_is_initialized))
2,724,963✔
307
    GC_init();
×
308

309
  LOCK();
2,724,963✔
310
  /* Do our share of marking work. */
311
  if (GC_incremental && !GC_dont_gc) {
2,724,963✔
312
    GC_collect_a_little_inner(1);
2,309,415✔
313
  }
314

315
  /* First see if we can reclaim a page of objects waiting to be reclaimed. */
316
  ok = &GC_obj_kinds[kind];
2,724,963✔
317
  rlh = ok->ok_reclaim_list;
2,724,963✔
318
  if (rlh != NULL) {
2,724,963✔
319
    struct hblk *hbp;
320
    hdr *hhdr;
321

322
    while ((hbp = rlh[lg]) != NULL) {
2,724,965✔
323
      hhdr = HDR(hbp);
1,190,846✔
324
      rlh[lg] = hhdr->hb_next;
1,190,846✔
325
      GC_ASSERT(hhdr->hb_sz == lb_adjusted);
1,190,846✔
326
      hhdr->hb_last_reclaimed = (unsigned short)GC_gc_no;
1,190,846✔
327
#ifdef PARALLEL_MARK
328
      if (GC_parallel) {
1,190,846✔
329
        GC_signed_word my_bytes_allocd_tmp
1,013,839✔
330
            = (GC_signed_word)AO_load(&GC_bytes_allocd_tmp);
1,013,839✔
331
        GC_ASSERT(my_bytes_allocd_tmp >= 0);
1,013,839✔
332
        /*
333
         * We only decrement it while holding the allocator lock.
334
         * Thus, we cannot accidentally adjust it down in more than
335
         * one thread simultaneously.
336
         */
337
        if (my_bytes_allocd_tmp != 0) {
1,013,839✔
338
          (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
912,347✔
339
                                 (AO_t)(-my_bytes_allocd_tmp));
340
          GC_bytes_allocd += (word)my_bytes_allocd_tmp;
912,347✔
341
        }
342
        GC_acquire_mark_lock();
1,013,839✔
343
        ++GC_fl_builder_count;
1,013,839✔
344
        UNLOCK();
1,013,839✔
345
        GC_release_mark_lock();
1,013,839✔
346

347
        op = GC_reclaim_generic(hbp, hhdr, lb_adjusted, ok->ok_init, NULL,
1,013,839✔
348
                                &my_bytes_allocd);
349
        if (op != NULL) {
1,013,839✔
350
          *result = op;
1,013,837✔
351
          (void)AO_fetch_and_add(&GC_bytes_allocd_tmp, (AO_t)my_bytes_allocd);
1,013,837✔
352
          GC_acquire_mark_lock();
1,013,837✔
353
          --GC_fl_builder_count;
1,013,837✔
354
          if (0 == GC_fl_builder_count)
1,013,837✔
355
            GC_notify_all_builder();
698,809✔
356
#  ifdef THREAD_SANITIZER
357
          GC_release_mark_lock();
358
          LOCK();
359
          GC_bytes_found += (GC_signed_word)my_bytes_allocd;
360
          UNLOCK();
361
#  else
362
          /* The resulting `GC_bytes_found` may be inaccurate. */
363
          GC_bytes_found += (GC_signed_word)my_bytes_allocd;
1,013,837✔
364
          GC_release_mark_lock();
1,013,837✔
365
#  endif
366
          (void)GC_clear_stack(NULL);
1,013,837✔
367
          return;
1,013,837✔
368
        }
369

370
        acquire_mark_lock_notify_builders();
2✔
371

372
        /*
373
         * The allocator lock is needed for access to the reclaim list.
374
         * We must decrement `GC_fl_builder_count` before reacquiring
375
         * the allocator lock.  Hopefully this path is rare.
376
         */
377
        LOCK();
2✔
378
        rlh = ok->ok_reclaim_list; /*< reload `rlh` after locking */
2✔
379
        if (UNLIKELY(NULL == rlh))
2✔
380
          break;
×
381
        continue;
2✔
382
      }
383
#endif
384

385
      op = GC_reclaim_generic(hbp, hhdr, lb_adjusted, ok->ok_init, NULL,
177,007✔
386
                              &my_bytes_allocd);
387
      if (op != NULL) {
177,007✔
388
        /* We also reclaimed memory, so we need to adjust that count. */
389
        GC_bytes_found += (GC_signed_word)my_bytes_allocd;
177,007✔
390
        GC_bytes_allocd += my_bytes_allocd;
177,007✔
391
        *result = op;
177,007✔
392
        UNLOCK();
177,007✔
393
        (void)GC_clear_stack(NULL);
177,007✔
394
        return;
177,007✔
395
      }
396
    }
397
  }
398

399
  /*
400
   * Next try to use prefix of global free list if there is one.
401
   * We do not refill it, but we need to use it up before allocating
402
   * a new block ourselves.
403
   */
404
  opp = &ok->ok_freelist[lg];
1,534,119✔
405
  op = *opp;
1,534,119✔
406
  if (op != NULL) {
1,534,119✔
407
    *opp = NULL;
501,520✔
408
    my_bytes_allocd = 0;
501,520✔
409
    for (p = op; p != NULL; p = obj_link(p)) {
13,071,821✔
410
      my_bytes_allocd += lb_adjusted;
12,586,478✔
411
      if ((word)my_bytes_allocd >= HBLKSIZE) {
12,586,478✔
412
        *opp = obj_link(p);
16,177✔
413
        obj_link(p) = NULL;
16,177✔
414
        break;
16,177✔
415
      }
416
    }
417
    GC_bytes_allocd += my_bytes_allocd;
501,520✔
418

419
  } else {
420
    /* Next try to allocate a new block worth of objects of this size. */
421
    struct hblk *h
422
        = GC_allochblk(lb_adjusted, kind, 0 /* `flags` */, 0 /* `align_m1` */);
1,032,599✔
423

424
    if (h /* `!= NULL` */) { /*< CPPCHECK */
1,032,599✔
425
      if (IS_UNCOLLECTABLE(kind))
1,027,924✔
426
        GC_set_hdr_marks(HDR(h));
×
427
      GC_bytes_allocd += HBLKSIZE - (HBLKSIZE % lb_adjusted);
1,027,924✔
428
#ifdef PARALLEL_MARK
429
      if (GC_parallel) {
1,027,924✔
430
        GC_acquire_mark_lock();
461,742✔
431
        ++GC_fl_builder_count;
461,742✔
432
        UNLOCK();
461,742✔
433
        GC_release_mark_lock();
461,742✔
434

435
        op = GC_build_fl(h, NULL, lg, ok->ok_init || GC_debugging_started);
461,742✔
436
        *result = op;
461,742✔
437

438
        acquire_mark_lock_notify_builders();
461,742✔
439
        (void)GC_clear_stack(NULL);
461,742✔
440
        return;
461,742✔
441
      }
442
#endif
443

444
      op = GC_build_fl(h, NULL, lg, ok->ok_init || GC_debugging_started);
566,182✔
445
    } else {
446
      /*
447
       * As a last attempt, try allocating a single object.
448
       * Note that this may trigger a collection or expand the heap.
449
       */
450
      op = GC_generic_malloc_inner(lb_adjusted - EXTRA_BYTES, kind,
4,675✔
451
                                   0 /* `flags` */);
452
      if (op != NULL)
4,675✔
453
        obj_link(op) = NULL;
4,675✔
454
    }
455
  }
456

457
  *result = op;
1,072,377✔
458
  UNLOCK();
1,072,377✔
459
  (void)GC_clear_stack(NULL);
1,072,377✔
460
}
461

462
GC_API GC_ATTR_MALLOC void *GC_CALL
463
GC_malloc_many(size_t lb)
426,829✔
464
{
465
  void *result;
466
  size_t lg, lb_adjusted;
467

468
  if (UNLIKELY(0 == lb))
426,829✔
469
    lb = 1;
×
470
  lg = ALLOC_REQUEST_GRANS(lb);
426,829✔
471
  lb_adjusted = GRANULES_TO_BYTES(lg);
426,829✔
472
  GC_generic_malloc_many(lb_adjusted, NORMAL, &result);
426,829✔
473
  return result;
426,829✔
474
}
475

476
/*
477
 * TODO: The debugging variant of `GC_memalign` and friends is tricky
478
 * and currently missing.  The major difficulty is: `store_debug_info`
479
 * should return the pointer of the object with the requested alignment
480
 * (unlike the object header).
481
 */
482

483
GC_API GC_ATTR_MALLOC void *GC_CALL
484
GC_memalign(size_t align, size_t lb)
634✔
485
{
486
  size_t align_m1 = align - 1;
634✔
487

488
  /* Check the alignment argument. */
489
  if (UNLIKELY(0 == align || (align & align_m1) != 0))
634✔
490
    return NULL;
×
491

492
  /* TODO: Use thread-local allocation. */
493
  if (align <= GC_GRANULE_BYTES)
634✔
494
    return GC_malloc(lb);
88✔
495
  return GC_malloc_kind_aligned_global(lb, NORMAL, align_m1);
546✔
496
}
497

498
GC_API int GC_CALL
499
GC_posix_memalign(void **memptr, size_t align, size_t lb)
42✔
500
{
501
  void *p;
502
  size_t align_minus_one = align - 1; /*< to workaround a cppcheck warning */
42✔
503

504
  /* Check alignment properly. */
505
  if (UNLIKELY(align < sizeof(void *) || (align_minus_one & align) != 0)) {
42✔
506
#ifdef MSWINCE
507
    return ERROR_INVALID_PARAMETER;
508
#else
509
    return EINVAL;
×
510
#endif
511
  }
512

513
  p = GC_memalign(align, lb);
42✔
514
  if (UNLIKELY(NULL == p)) {
42✔
515
#ifdef MSWINCE
516
    return ERROR_NOT_ENOUGH_MEMORY;
517
#else
518
    return ENOMEM;
×
519
#endif
520
  }
521
  *memptr = p;
42✔
522
  return 0; /*< success */
42✔
523
}
524

525
#ifndef GC_NO_VALLOC
526
GC_API GC_ATTR_MALLOC void *GC_CALL
527
GC_valloc(size_t lb)
42✔
528
{
529
  if (UNLIKELY(!GC_is_initialized))
42✔
530
    GC_init();
×
531
  GC_ASSERT(GC_real_page_size != 0);
42✔
532
  return GC_memalign(GC_real_page_size, lb);
42✔
533
}
534

535
GC_API GC_ATTR_MALLOC void *GC_CALL
536
GC_pvalloc(size_t lb)
42✔
537
{
538
  if (UNLIKELY(!GC_is_initialized))
42✔
539
    GC_init();
×
540
  GC_ASSERT(GC_real_page_size != 0);
42✔
541
  lb = SIZET_SAT_ADD(lb, GC_real_page_size - 1) & ~(GC_real_page_size - 1);
42✔
542
  return GC_memalign(GC_real_page_size, lb);
42✔
543
}
544
#endif /* !GC_NO_VALLOC */
545

546
GC_API GC_ATTR_MALLOC char *GC_CALL
547
GC_strdup(const char *s)
48,294✔
548
{
549
  /*
550
   * Implementation of a variant of `strdup()` that uses the collector
551
   * to allocate a copy of the string.
552
   */
553
  char *copy;
554
  size_t lb;
555
  if (s == NULL)
48,294✔
556
    return NULL;
×
557
  lb = strlen(s) + 1;
48,294✔
558
  copy = (char *)GC_malloc_atomic(lb);
48,294✔
559
  if (UNLIKELY(NULL == copy)) {
48,294✔
560
#ifndef MSWINCE
561
    errno = ENOMEM;
×
562
#endif
563
    return NULL;
×
564
  }
565
  BCOPY(s, copy, lb);
48,294✔
566
  return copy;
48,294✔
567
}
568

569
GC_API GC_ATTR_MALLOC char *GC_CALL
570
GC_strndup(const char *str, size_t size)
42✔
571
{
572
  char *copy;
573
  /* Note: `str` is expected to be non-`NULL`. */
574
  size_t len = strlen(str);
42✔
575
  if (UNLIKELY(len > size))
42✔
576
    len = size;
42✔
577
  copy = (char *)GC_malloc_atomic(len + 1);
42✔
578
  if (UNLIKELY(NULL == copy)) {
42✔
579
#ifndef MSWINCE
580
    errno = ENOMEM;
×
581
#endif
582
    return NULL;
×
583
  }
584
  if (LIKELY(len > 0))
42✔
585
    BCOPY(str, copy, len);
42✔
586
  copy[len] = '\0';
42✔
587
  return copy;
42✔
588
}
589

590
#ifdef GC_REQUIRE_WCSDUP
591
#  include <wchar.h> /*< for `wcslen()` */
592

593
GC_API GC_ATTR_MALLOC wchar_t *GC_CALL
594
GC_wcsdup(const wchar_t *str)
42✔
595
{
596
  size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
42✔
597
  wchar_t *copy = (wchar_t *)GC_malloc_atomic(lb);
42✔
598

599
  if (UNLIKELY(NULL == copy)) {
42✔
600
#  ifndef MSWINCE
601
    errno = ENOMEM;
×
602
#  endif
603
    return NULL;
×
604
  }
605
  BCOPY(str, copy, lb);
42✔
606
  return copy;
42✔
607
}
608

609
#  if !defined(wcsdup) && defined(REDIRECT_MALLOC) \
610
      && !defined(REDIRECT_MALLOC_IN_HEADER)
611
wchar_t *
612
wcsdup(const wchar_t *str)
613
{
614
  return GC_wcsdup(str);
615
}
616
#  endif
617
#endif /* GC_REQUIRE_WCSDUP */
618

619
#ifndef CPPCHECK
620
GC_API void *GC_CALL
621
GC_malloc_stubborn(size_t lb)
×
622
{
623
  return GC_malloc(lb);
×
624
}
625

626
GC_API void GC_CALL
627
GC_change_stubborn(const void *p)
×
628
{
629
  UNUSED_ARG(p);
630
}
×
631
#endif /* !CPPCHECK */
632

633
GC_API void GC_CALL
634
GC_end_stubborn_change(const void *p)
169,075,553✔
635
{
636
  GC_dirty(p); /*< entire object */
169,075,553✔
637
}
169,075,553✔
638

639
GC_API void GC_CALL
640
GC_ptr_store_and_dirty(void *p, const void *q)
137,202,401✔
641
{
642
  *(const void **)p = q;
137,202,401✔
643
  GC_dirty(p);
137,202,401✔
644
  REACHABLE_AFTER_DIRTY(q);
137,202,401✔
645
}
137,202,401✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc