• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 2053

22 Feb 2026 05:48AM UTC coverage: 77.233% (+3.3%) from 73.894%
2053

push

travis-ci

ivmai
Fix missing GC_ATTR_NONNULL for API functions
(fix of commit d012f92c)

* include/gc/gc.h (GC_exclude_static_roots, GC_add_roots): Add
`GC_ATTR_NONNULL` attribute for the appropriate arguments.
* include/gc/gc.h [GC_WIN32_THREADS && (!GC_PTHREADS || GC_BUILD
|| GC_WINDOWS_H_INCLUDED) && (!GC_NO_THREAD_DECLS || GC_BUILD)
&& !GC_DONT_INCL_WINDOWS_H] (GC_CreateThread, GC_beginthreadex):
Likewise.
* include/gc/gc_inline.h (GC_generic_malloc_many): Likewise.
* include/gc/gc_mark.h (GC_mark_and_push, GC_new_proc,
GC_new_proc_inner): Likewise.
* include/gc/gc_pthread_redirects.h [GC_PTHREADS
&& !GC_PTHREAD_REDIRECTS_ONLY] (GC_pthread_create): Likewise.
* include/private/gc_priv.h (NONNULL_PROC_NOT_ZERO): New macro.
* mallocx.c (GC_generic_malloc_many): Add assertion that `result` is
non-null.
* mark_rts.c (GC_add_roots_inner): Add assertion that `b` is non-null.
* mark_rts.c (GC_exclude_static_roots_inner): Add assertion that
`start` is non-null.
* misc.c (GC_new_proc_inner): Add assertion that `proc` is non-zero.
* pthread_support.c (GC_wrap_pthread_create): Add assertion that
`start_routine` is non-zero.

3 of 3 new or added lines in 2 files covered. (100.0%)

128 existing lines in 9 files now uncovered.

6873 of 8899 relevant lines covered (77.23%)

17354920.53 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.38
/mallocx.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
6
 * Copyright (c) 2009-2025 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_priv.h"
19

20
/*
21
 * These are extra allocation routines that are likely to be less
22
 * frequently used than those in `malloc.c` file.  They are separate in
23
 * the hope that the `.o` file will be excluded from statically linked
24
 * executables.  We should probably break this up further.
25
 */
26

27
#include <string.h>
28

29
#ifndef MSWINCE
30
#  include <errno.h>
31
#endif
32

33
/*
34
 * Some externally visible but unadvertised variables to allow access
35
 * to free lists from inlined allocators without include `gc_priv.h` file
36
 * or introducing dependencies on internal data structure layouts.
37
 */
38
#include "private/gc_alloc_ptrs.h"
39
void **const GC_objfreelist_ptr = GC_objfreelist;
40
void **const GC_aobjfreelist_ptr = GC_aobjfreelist;
41
void **const GC_uobjfreelist_ptr = GC_uobjfreelist;
42
#ifdef GC_ATOMIC_UNCOLLECTABLE
43
void **const GC_auobjfreelist_ptr = GC_auobjfreelist;
44
#endif
45

46
GC_API int GC_CALL
47
GC_get_kind_and_size(const void *p, size_t *psize)
1,260,504✔
48
{
49
  const hdr *hhdr = HDR(p);
1,260,504✔
50

51
  if (psize != NULL) {
1,260,504✔
52
    *psize = hhdr->hb_sz;
630,252✔
53
  }
54
  return hhdr->hb_obj_kind;
1,260,504✔
55
}
56

57
GC_API GC_ATTR_MALLOC void *GC_CALL
58
GC_generic_or_special_malloc(size_t lb, int kind)
3,130,504✔
59
{
60
  switch (kind) {
3,130,504✔
61
  case PTRFREE:
3,130,378✔
62
  case NORMAL:
63
    return GC_malloc_kind(lb, kind);
3,130,378✔
64
  case UNCOLLECTABLE:
126✔
65
#ifdef GC_ATOMIC_UNCOLLECTABLE
66
  case AUNCOLLECTABLE:
67
#endif
68
    return GC_generic_malloc_uncollectable(lb, kind);
126✔
69
  default:
×
70
    return GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */, 0);
×
71
  }
72
}
73

74
GC_API void *GC_CALL
75
GC_realloc(void *p, size_t lb)
20,000,378✔
76
{
77
  hdr *hhdr;
78
  void *result;
79
#if defined(_FORTIFY_SOURCE) && defined(__GNUC__) && !defined(__clang__)
80
  /*
81
   * Use `cleared_p` instead of `p` as a workaround to avoid passing
82
   * `alloc_size(lb)` attribute associated with `p` to `memset`
83
   * (including a `memset` call inside `GC_free`).
84
   */
85
  volatile GC_uintptr_t cleared_p = (GC_uintptr_t)p;
86
#else
87
#  define cleared_p p
88
#endif
89
  size_t sz;      /*< current size in bytes */
90
  size_t orig_sz; /*< original `sz` (in bytes) */
91
  int obj_kind;
92

93
  if (NULL == p) {
20,000,378✔
94
    /* Required by ANSI. */
95
    return GC_malloc(lb);
×
96
  }
97
  if (0 == lb) /* `&& p != NULL` */ {
20,000,378✔
98
#ifndef IGNORE_FREE
99
    GC_free(p);
×
100
#endif
101
    return NULL;
×
102
  }
103
  hhdr = HDR(HBLKPTR(p));
20,000,378✔
104
  sz = hhdr->hb_sz;
20,000,378✔
105
  obj_kind = hhdr->hb_obj_kind;
20,000,378✔
106
  orig_sz = sz;
20,000,378✔
107

108
  if (sz > MAXOBJBYTES) {
20,000,378✔
109
    const struct obj_kind *ok = &GC_obj_kinds[obj_kind];
252✔
110
    word descr = ok->ok_descriptor;
252✔
111

112
    /* Round it up to the next whole heap block. */
113
    sz = (sz + HBLKSIZE - 1) & ~(HBLKSIZE - 1);
252✔
114
#if ALIGNMENT > GC_DS_TAGS
115
    /*
116
     * An extra byte is not added in case of ignore-off-page allocated
117
     * objects not smaller than `HBLKSIZE`.
118
     */
119
    GC_ASSERT(sz >= HBLKSIZE);
252✔
120
    if (EXTRA_BYTES != 0 && (hhdr->hb_flags & IGNORE_OFF_PAGE) != 0
252✔
121
        && obj_kind == NORMAL)
×
122
      descr += ALIGNMENT; /*< or set to 0 */
×
123
#endif
124
    if (ok->ok_relocate_descr) {
252✔
125
      descr += sz;
252✔
126
    }
127

128
    /*
129
     * `GC_realloc` might be changing the block size while
130
     * `GC_reclaim_block` or `GC_clear_hdr_marks` is examining it.
131
     * The change to the size field is benign, in that `GC_reclaim`
132
     * (and `GC_clear_hdr_marks`) would work correctly with either
133
     * value, since we are not changing the number of objects in
134
     * the block.  But seeing a half-updated value (though unlikely
135
     * to occur in practice) could be probably bad.
136
     * Using unordered atomic accesses on `hb_sz` and `hb_descr`
137
     * fields would solve the issue.  (The alternate solution might
138
     * be to initially overallocate large objects, so we do not
139
     * have to adjust the size in `GC_realloc`, if they still fit.
140
     * But that is probably more expensive, since we may end up
141
     * scanning a bunch of zeros during the collection.)
142
     */
143
#ifdef AO_HAVE_store
144
    AO_store(&hhdr->hb_sz, sz);
252✔
145
    AO_store((AO_t *)&hhdr->hb_descr, descr);
252✔
146
#else
147
    {
148
      LOCK();
149
      hhdr->hb_sz = sz;
150
      hhdr->hb_descr = descr;
151
      UNLOCK();
152
    }
153
#endif
154

155
#ifdef MARK_BIT_PER_OBJ
156
    GC_ASSERT(hhdr->hb_inv_sz == LARGE_INV_SZ);
157
#else
158
    GC_ASSERT((hhdr->hb_flags & LARGE_BLOCK) != 0
252✔
159
              && hhdr->hb_map[ANY_INDEX] == 1);
160
#endif
161
    if (IS_UNCOLLECTABLE(obj_kind))
252✔
162
      GC_non_gc_bytes += (sz - orig_sz);
×
163
    /* Extra area is already cleared by `GC_alloc_large_and_clear`. */
164
  }
165
  if (ADD_EXTRA_BYTES(lb) <= sz) {
20,000,378✔
166
    if (lb >= (sz >> 1)) {
17,500,126✔
167
      if (orig_sz > lb) {
17,500,126✔
168
        /* Clear unneeded part of object to avoid bogus pointer tracing. */
169
        BZERO((ptr_t)cleared_p + lb, orig_sz - lb);
17,500,000✔
170
      }
171
      return p;
17,500,126✔
172
    }
173
    /*
174
     * Shrink it.  Note: shrinking of large blocks is not implemented
175
     * efficiently.
176
     */
177
    sz = lb;
×
178
  }
179
  result = GC_generic_or_special_malloc((word)lb, obj_kind);
2,500,252✔
180
  if (LIKELY(result != NULL)) {
2,500,252✔
181
    /*
182
     * In case of shrink, it could also return original object.
183
     * But this gives the client warning of imminent disaster.
184
     */
185
    BCOPY(p, result, sz);
2,500,252✔
186
#ifndef IGNORE_FREE
187
    GC_free((ptr_t)cleared_p);
2,500,252✔
188
#endif
189
  }
190
  return result;
2,500,252✔
191
#undef cleared_p
192
}
193

194
GC_API void *GC_CALL
195
GC_reallocf(void *p, size_t lb)
126✔
196
{
197
  void *result = GC_realloc(p, lb);
126✔
198

199
#ifndef IGNORE_FREE
200
  if (UNLIKELY(NULL == result))
126✔
201
    GC_free(p);
×
202
#endif
203
  return result;
126✔
204
}
205

206
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
207
#  ifdef REDIRECT_MALLOC_DEBUG
208
#    define REDIRECT_REALLOC_F GC_debug_realloc_replacement
209
#    define REDIRECT_REALLOCF_F GC_debug_reallocf_replacement
210
/* As with `malloc`, avoid two levels of extra calls here. */
211
#    define GC_debug_realloc_replacement(p, lb) \
212
      GC_debug_realloc(p, lb, GC_DBG_EXTRAS)
213
#    define GC_debug_reallocf_replacement(p, lb) \
214
      GC_debug_reallocf(p, lb, GC_DBG_EXTRAS)
215
#  else
216
#    define REDIRECT_REALLOC_F GC_realloc
217
#    define REDIRECT_REALLOCF_F GC_reallocf
218
#  endif
219

220
void *
221
realloc(void *p, size_t lb)
222
{
223
  return REDIRECT_REALLOC_F(p, lb);
224
}
225

226
void *
227
reallocf(void *p, size_t lb)
228
{
229
  return REDIRECT_REALLOCF_F(p, lb);
230
}
231

232
#  undef GC_debug_realloc_replacement
233
#  undef GC_debug_reallocf_replacement
234
#endif
235

236
GC_API GC_ATTR_MALLOC void *GC_CALL
237
GC_generic_malloc_ignore_off_page(size_t lb, int kind)
630,000✔
238
{
239
  return GC_generic_malloc_aligned(lb, kind, IGNORE_OFF_PAGE,
630,000✔
240
                                   0 /* `align_m1` */);
241
}
242

243
GC_API GC_ATTR_MALLOC void *GC_CALL
244
GC_malloc_ignore_off_page(size_t lb)
630,000✔
245
{
246
  return GC_generic_malloc_aligned(lb, NORMAL, IGNORE_OFF_PAGE, 0);
630,000✔
247
}
248

249
GC_API GC_ATTR_MALLOC void *GC_CALL
250
GC_malloc_atomic_ignore_off_page(size_t lb)
630,002✔
251
{
252
  return GC_generic_malloc_aligned(lb, PTRFREE, IGNORE_OFF_PAGE, 0);
630,002✔
253
}
254

255
/*
256
 * Increment `GC_bytes_allocd` from code that does not have direct access
257
 * to `GC_arrays`.
258
 */
259
void GC_CALL
260
GC_incr_bytes_allocd(size_t n)
63✔
261
{
262
  GC_bytes_allocd += n;
63✔
263
}
63✔
264

265
/* The same as `GC_incr_bytes_allocd` but for `GC_bytes_freed`. */
266
void GC_CALL
267
GC_incr_bytes_freed(size_t n)
63✔
268
{
269
  GC_bytes_freed += n;
63✔
270
}
63✔
271

272
GC_API size_t GC_CALL
273
GC_get_expl_freed_bytes_since_gc(void)
2,858,800✔
274
{
275
  return (size_t)GC_bytes_freed;
2,858,800✔
276
}
277

278
#ifdef PARALLEL_MARK
279
static void
280
acquire_mark_lock_notify_builders(void)
718,060✔
281
{
282
  GC_acquire_mark_lock();
718,060✔
283
  --GC_fl_builder_count;
718,060✔
284
  if (0 == GC_fl_builder_count)
718,060✔
285
    GC_notify_all_builder();
591,505✔
286
  GC_release_mark_lock();
718,060✔
287
}
718,060✔
288
#endif
289

290
GC_API void GC_CALL
291
GC_generic_malloc_many(size_t lb_adjusted, int kind, void **result)
3,391,429✔
292
{
293
  void *op;
294
  void *p;
295
  void **opp;
296
  /* The value of `lb_adjusted` converted to granules. */
297
  size_t lg;
298
  word my_bytes_allocd = 0;
3,391,429✔
299
  struct obj_kind *ok;
300
  struct hblk **rlh;
301

302
  if (UNLIKELY(!GC_is_initialized))
3,391,429✔
UNCOV
303
    GC_init();
×
304
  GC_ASSERT(NONNULL_ARG_NOT_NULL(result));
3,391,429✔
305
  GC_ASSERT(lb_adjusted != 0 && (lb_adjusted & (GC_GRANULE_BYTES - 1)) == 0);
3,391,429✔
306
  /* Currently a single object is always allocated if manual VDB. */
307
  /*
308
   * TODO: `GC_dirty` should be called for each linked object (but the
309
   * last one) to support multiple objects allocation.
310
   */
311
  if (UNLIKELY(lb_adjusted > MAXOBJBYTES) || GC_manual_vdb) {
3,391,429✔
312
    op = GC_generic_malloc_aligned(lb_adjusted - EXTRA_BYTES, kind,
×
313
                                   0 /* `flags` */, 0 /* `align_m1` */);
314
    if (LIKELY(op != NULL))
×
UNCOV
315
      obj_link(op) = NULL;
×
316
    *result = op;
×
317
#ifndef NO_MANUAL_VDB
318
    if (GC_manual_vdb && GC_is_heap_ptr(result)) {
×
UNCOV
319
      GC_dirty_inner(result);
×
UNCOV
320
      REACHABLE_AFTER_DIRTY(op);
×
321
    }
322
#endif
323
    return;
2,280,265✔
324
  }
325

326
  GC_ASSERT(kind < MAXOBJKINDS);
3,391,429✔
327
  lg = BYTES_TO_GRANULES(lb_adjusted);
3,391,429✔
328
  if (UNLIKELY(get_have_errors()))
3,391,429✔
329
    GC_print_all_errors();
40✔
330
  GC_notify_or_invoke_finalizers();
3,391,429✔
331
  GC_DBG_COLLECT_AT_MALLOC(lb_adjusted - EXTRA_BYTES);
332

333
  LOCK();
3,391,429✔
334
  /* Do our share of marking work. */
335
  if (GC_incremental && !GC_dont_gc) {
3,391,429✔
336
    GC_collect_a_little_inner(1);
2,820,613✔
337
  }
338

339
  /* First see if we can reclaim a page of objects waiting to be reclaimed. */
340
  ok = &GC_obj_kinds[kind];
3,391,429✔
341
  rlh = ok->ok_reclaim_list;
3,391,429✔
342
  if (rlh != NULL) {
3,391,429✔
343
    struct hblk *hbp;
344
    hdr *hhdr;
345

346
    while ((hbp = rlh[lg]) != NULL) {
3,391,430✔
347
      hhdr = HDR(hbp);
1,562,207✔
348
      rlh[lg] = hhdr->hb_next;
1,562,207✔
349
      GC_ASSERT(hhdr->hb_sz == lb_adjusted);
1,562,207✔
350
      hhdr->hb_last_reclaimed = (unsigned short)GC_gc_no;
1,562,207✔
351
#ifdef PARALLEL_MARK
352
      if (GC_parallel) {
1,562,207✔
353
        GC_signed_word my_bytes_allocd_tmp
1,385,054✔
354
            = (GC_signed_word)AO_load(&GC_bytes_allocd_tmp);
1,385,054✔
355
        GC_ASSERT(my_bytes_allocd_tmp >= 0);
1,385,054✔
356
        /*
357
         * We only decrement it while holding the allocator lock.
358
         * Thus, we cannot accidentally adjust it down in more than
359
         * one thread simultaneously.
360
         */
361
        if (my_bytes_allocd_tmp != 0) {
1,385,054✔
362
          (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
1,238,218✔
363
                                 (AO_t)(-my_bytes_allocd_tmp));
364
          GC_bytes_allocd += (word)my_bytes_allocd_tmp;
1,238,218✔
365
        }
366
        GC_acquire_mark_lock();
1,385,054✔
367
        ++GC_fl_builder_count;
1,385,054✔
368
        UNLOCK();
1,385,054✔
369
        GC_release_mark_lock();
1,385,054✔
370

371
        op = GC_reclaim_generic(hbp, hhdr, lb_adjusted, ok->ok_init, NULL,
1,385,054✔
372
                                &my_bytes_allocd);
373
        if (op != NULL) {
1,385,054✔
374
          *result = op;
1,385,053✔
375
          (void)AO_fetch_and_add(&GC_bytes_allocd_tmp, (AO_t)my_bytes_allocd);
1,385,053✔
376
          GC_acquire_mark_lock();
1,385,053✔
377
          --GC_fl_builder_count;
1,385,053✔
378
          if (0 == GC_fl_builder_count)
1,385,053✔
379
            GC_notify_all_builder();
957,068✔
380
#  ifdef THREAD_SANITIZER
381
          GC_release_mark_lock();
382
          LOCK();
383
          GC_bytes_found += (GC_signed_word)my_bytes_allocd;
384
          UNLOCK();
385
#  else
386
          /* The resulting `GC_bytes_found` may be inaccurate. */
387
          GC_bytes_found += (GC_signed_word)my_bytes_allocd;
1,385,053✔
388
          GC_release_mark_lock();
1,385,053✔
389
#  endif
390
          (void)GC_clear_stack(NULL);
1,385,053✔
391
          return;
1,385,053✔
392
        }
393

394
        acquire_mark_lock_notify_builders();
1✔
395

396
        /*
397
         * The allocator lock is needed for access to the reclaim list.
398
         * We must decrement `GC_fl_builder_count` before reacquiring
399
         * the allocator lock.  Hopefully this path is rare.
400
         */
401
        LOCK();
1✔
402
        rlh = ok->ok_reclaim_list; /*< reload `rlh` after locking */
1✔
403
        if (UNLIKELY(NULL == rlh))
1✔
404
          break;
×
405
        continue;
1✔
406
      }
407
#endif
408

409
      op = GC_reclaim_generic(hbp, hhdr, lb_adjusted, ok->ok_init, NULL,
177,153✔
410
                              &my_bytes_allocd);
411
      if (op != NULL) {
177,153✔
412
        /* We also reclaimed memory, so we need to adjust that count. */
413
        GC_bytes_found += (GC_signed_word)my_bytes_allocd;
177,153✔
414
        GC_bytes_allocd += my_bytes_allocd;
177,153✔
415
        *result = op;
177,153✔
416
        UNLOCK();
177,153✔
417
        (void)GC_clear_stack(NULL);
177,153✔
418
        return;
177,153✔
419
      }
420
    }
421
  }
422

423
  /*
424
   * Next try to use prefix of global free list if there is one.
425
   * We do not refill it, but we need to use it up before allocating
426
   * a new block ourselves.
427
   */
428
  opp = &ok->ok_freelist[lg];
1,829,223✔
429
  op = *opp;
1,829,223✔
430
  if (op != NULL) {
1,829,223✔
431
    *opp = NULL;
540,256✔
432
    my_bytes_allocd = 0;
540,256✔
433
    for (p = op; p != NULL; p = obj_link(p)) {
17,731,972✔
434
      my_bytes_allocd += lb_adjusted;
17,208,216✔
435
      if ((word)my_bytes_allocd >= HBLKSIZE) {
17,208,216✔
436
        *opp = obj_link(p);
16,500✔
437
        obj_link(p) = NULL;
16,500✔
438
        break;
16,500✔
439
      }
440
    }
441
    GC_bytes_allocd += my_bytes_allocd;
540,256✔
442

443
  } else {
444
    /* Next try to allocate a new block worth of objects of this size. */
445
    struct hblk *h
446
        = GC_allochblk(lb_adjusted, kind, 0 /* `flags` */, 0 /* `align_m1` */);
1,288,967✔
447

448
    if (h /* `!= NULL` */) { /*< CPPCHECK */
1,288,967✔
449
      if (IS_UNCOLLECTABLE(kind))
1,284,229✔
450
        GC_set_hdr_marks(HDR(h));
×
451
      GC_bytes_allocd += HBLKSIZE - (HBLKSIZE % lb_adjusted);
1,284,229✔
452
#ifdef PARALLEL_MARK
453
      if (GC_parallel) {
1,284,229✔
454
        GC_acquire_mark_lock();
718,059✔
455
        ++GC_fl_builder_count;
718,059✔
456
        UNLOCK();
718,059✔
457
        GC_release_mark_lock();
718,059✔
458

459
        op = GC_build_fl(h, NULL, lg, ok->ok_init || GC_debugging_started);
718,059✔
460
        *result = op;
718,059✔
461

462
        acquire_mark_lock_notify_builders();
718,059✔
463
        (void)GC_clear_stack(NULL);
718,059✔
464
        return;
718,059✔
465
      }
466
#endif
467

468
      op = GC_build_fl(h, NULL, lg, ok->ok_init || GC_debugging_started);
566,170✔
469
    } else {
470
      /*
471
       * As a last attempt, try allocating a single object.
472
       * Note that this may trigger a collection or expand the heap.
473
       */
474
      op = GC_generic_malloc_inner(lb_adjusted - EXTRA_BYTES, kind,
4,738✔
475
                                   0 /* `flags` */);
476
      if (op != NULL)
4,738✔
477
        obj_link(op) = NULL;
4,738✔
478
    }
479
  }
480

481
  *result = op;
1,111,164✔
482
  UNLOCK();
1,111,164✔
483
  (void)GC_clear_stack(NULL);
1,111,164✔
484
}
485

486
GC_API GC_ATTR_MALLOC void *GC_CALL
487
GC_malloc_many(size_t lb)
635,315✔
488
{
489
  void *result;
490
  size_t lg, lb_adjusted;
491

492
  if (UNLIKELY(0 == lb))
635,315✔
493
    lb = 1;
×
494
  lg = ALLOC_REQUEST_GRANS(lb);
635,315✔
495
  lb_adjusted = GRANULES_TO_BYTES(lg);
635,315✔
496
  GC_generic_malloc_many(lb_adjusted, NORMAL, &result);
635,315✔
497
  return result;
635,315✔
498
}
499

500
/*
501
 * TODO: The debugging variant of `GC_memalign` and friends is tricky
502
 * and currently missing.  The major difficulty is: `store_debug_info`
503
 * should return the pointer of the object with the requested alignment
504
 * (unlike the object header).
505
 */
506

507
GC_API GC_ATTR_MALLOC void *GC_CALL
508
GC_memalign(size_t align, size_t lb)
949✔
509
{
510
  size_t align_m1 = align - 1;
949✔
511

512
  /* Check the alignment argument. */
513
  if (UNLIKELY(0 == align || (align & align_m1) != 0))
949✔
514
    return NULL;
×
515

516
  /* TODO: Use thread-local allocation. */
517
  if (align <= GC_GRANULE_BYTES)
949✔
518
    return GC_malloc(lb);
130✔
519
  return GC_malloc_kind_aligned_global(lb, NORMAL, align_m1);
819✔
520
}
521

522
GC_API int GC_CALL
523
GC_posix_memalign(void **memptr, size_t align, size_t lb)
63✔
524
{
525
  void *p;
526
  size_t align_minus_one = align - 1; /*< to workaround a cppcheck warning */
63✔
527

528
  /* Check alignment properly. */
529
  if (UNLIKELY(align < sizeof(void *) || (align_minus_one & align) != 0)) {
63✔
530
#ifdef MSWINCE
531
    return ERROR_INVALID_PARAMETER;
532
#else
533
    return EINVAL;
×
534
#endif
535
  }
536

537
  p = GC_memalign(align, lb);
63✔
538
  if (UNLIKELY(NULL == p)) {
63✔
539
#ifdef MSWINCE
540
    return ERROR_NOT_ENOUGH_MEMORY;
541
#else
542
    return ENOMEM;
×
543
#endif
544
  }
545
  *memptr = p;
63✔
546
  return 0; /*< success */
63✔
547
}
548

549
#ifndef GC_NO_VALLOC
550
GC_API GC_ATTR_MALLOC void *GC_CALL
551
GC_valloc(size_t lb)
63✔
552
{
553
  if (UNLIKELY(!GC_is_initialized))
63✔
554
    GC_init();
×
555
  GC_ASSERT(GC_real_page_size != 0);
63✔
556
  return GC_memalign(GC_real_page_size, lb);
63✔
557
}
558

559
GC_API GC_ATTR_MALLOC void *GC_CALL
560
GC_pvalloc(size_t lb)
63✔
561
{
562
  if (UNLIKELY(!GC_is_initialized))
63✔
563
    GC_init();
×
564
  GC_ASSERT(GC_real_page_size != 0);
63✔
565
  lb = SIZET_SAT_ADD(lb, GC_real_page_size - 1) & ~(GC_real_page_size - 1);
63✔
566
  return GC_memalign(GC_real_page_size, lb);
63✔
567
}
568
#endif /* !GC_NO_VALLOC */
569

570
GC_API GC_ATTR_MALLOC char *GC_CALL
571
GC_strdup(const char *s)
45,615✔
572
{
573
  /*
574
   * Implementation of a variant of `strdup()` that uses the collector
575
   * to allocate a copy of the string.
576
   */
577
  char *copy;
578
  size_t lb;
579
  if (s == NULL)
45,615✔
580
    return NULL;
×
581
  lb = strlen(s) + 1;
45,615✔
582
  copy = (char *)GC_malloc_atomic(lb);
45,615✔
583
  if (UNLIKELY(NULL == copy)) {
45,615✔
584
#ifndef MSWINCE
585
    errno = ENOMEM;
×
586
#endif
587
    return NULL;
×
588
  }
589
  BCOPY(s, copy, lb);
45,615✔
590
  return copy;
45,615✔
591
}
592

593
GC_API GC_ATTR_MALLOC char *GC_CALL
594
GC_strndup(const char *str, size_t size)
63✔
595
{
596
  char *copy;
597
  /* Note: `str` is expected to be non-`NULL`. */
598
  size_t len = strlen(str);
63✔
599
  if (UNLIKELY(len > size))
63✔
600
    len = size;
63✔
601
  copy = (char *)GC_malloc_atomic(len + 1);
63✔
602
  if (UNLIKELY(NULL == copy)) {
63✔
603
#ifndef MSWINCE
604
    errno = ENOMEM;
×
605
#endif
606
    return NULL;
×
607
  }
608
  if (LIKELY(len > 0))
63✔
609
    BCOPY(str, copy, len);
63✔
610
  copy[len] = '\0';
63✔
611
  return copy;
63✔
612
}
613

614
#ifdef GC_REQUIRE_WCSDUP
615
#  include <wchar.h> /*< for `wcslen()` */
616

617
GC_API GC_ATTR_MALLOC wchar_t *GC_CALL
618
GC_wcsdup(const wchar_t *str)
63✔
619
{
620
  size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
63✔
621
  wchar_t *copy = (wchar_t *)GC_malloc_atomic(lb);
63✔
622

623
  if (UNLIKELY(NULL == copy)) {
63✔
624
#  ifndef MSWINCE
625
    errno = ENOMEM;
×
626
#  endif
627
    return NULL;
×
628
  }
629
  BCOPY(str, copy, lb);
63✔
630
  return copy;
63✔
631
}
632

633
#  if !defined(wcsdup) && defined(REDIRECT_MALLOC) \
634
      && !defined(REDIRECT_MALLOC_IN_HEADER)
635
wchar_t *
636
wcsdup(const wchar_t *str)
637
{
638
  return GC_wcsdup(str);
639
}
640
#  endif
641
#endif /* GC_REQUIRE_WCSDUP */
642

643
#ifndef CPPCHECK
644
GC_API void *GC_CALL
645
GC_malloc_stubborn(size_t lb)
×
646
{
647
  return GC_malloc(lb);
×
648
}
649

650
GC_API void GC_CALL
651
GC_change_stubborn(const void *p)
×
652
{
653
  UNUSED_ARG(p);
654
}
×
655
#endif /* !CPPCHECK */
656

657
GC_API void GC_CALL
658
GC_end_stubborn_change(const void *p)
205,043,460✔
659
{
660
  GC_dirty(p); /*< entire object */
205,043,460✔
661
}
205,043,460✔
662

663
GC_API void GC_CALL
664
GC_ptr_store_and_dirty(void *p, const void *q)
194,944,065✔
665
{
666
  *(const void **)p = q;
194,944,065✔
667
  GC_dirty(p);
194,944,065✔
668
  REACHABLE_AFTER_DIRTY(q);
194,944,065✔
669
}
194,944,065✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc