• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 2053

22 Feb 2026 05:48AM UTC coverage: 77.233% (+3.3%) from 73.894%
2053

push

travis-ci

ivmai
Fix missing GC_ATTR_NONNULL for API functions
(fix of commit d012f92c)

* include/gc/gc.h (GC_exclude_static_roots, GC_add_roots): Add
`GC_ATTR_NONNULL` attribute for the appropriate arguments.
* include/gc/gc.h [GC_WIN32_THREADS && (!GC_PTHREADS || GC_BUILD
|| GC_WINDOWS_H_INCLUDED) && (!GC_NO_THREAD_DECLS || GC_BUILD)
&& !GC_DONT_INCL_WINDOWS_H] (GC_CreateThread, GC_beginthreadex):
Likewise.
* include/gc/gc_inline.h (GC_generic_malloc_many): Likewise.
* include/gc/gc_mark.h (GC_mark_and_push, GC_new_proc,
GC_new_proc_inner): Likewise.
* include/gc/gc_pthread_redirects.h [GC_PTHREADS
&& !GC_PTHREAD_REDIRECTS_ONLY] (GC_pthread_create): Likewise.
* include/private/gc_priv.h (NONNULL_PROC_NOT_ZERO): New macro.
* mallocx.c (GC_generic_malloc_many): Add assertion that `result` is
non-null.
* mark_rts.c (GC_add_roots_inner): Add assertion that `b` is non-null.
* mark_rts.c (GC_exclude_static_roots_inner): Add assertion that
`start` is non-null.
* misc.c (GC_new_proc_inner): Add assertion that `proc` is non-zero.
* pthread_support.c (GC_wrap_pthread_create): Add assertion that
`start_routine` is non-zero.

3 of 3 new or added lines in 2 files covered. (100.0%)

128 existing lines in 9 files now uncovered.

6873 of 8899 relevant lines covered (77.23%)

17354920.53 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.49
/malloc.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5
 * Copyright (c) 2008-2025 Ivan Maidanski
6
 *
7
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9
 *
10
 * Permission is hereby granted to use or copy this program
11
 * for any purpose, provided the above notices are retained on all copies.
12
 * Permission to modify the code and to distribute modified code is granted,
13
 * provided the above notices are retained, and a notice that the code was
14
 * modified is included with the above copyright notice.
15
 */
16

17
#include "private/gc_priv.h"
18

19
#include <string.h>
20

21
/* Allocate reclaim list for the kind.  Returns `TRUE` on success. */
22
STATIC GC_bool
23
GC_alloc_reclaim_list(struct obj_kind *ok)
168✔
24
{
25
  struct hblk **result;
26

27
  GC_ASSERT(I_HOLD_LOCK());
168✔
28
  result = (struct hblk **)GC_scratch_alloc((MAXOBJGRANULES + 1)
168✔
29
                                            * sizeof(struct hblk *));
30
  if (UNLIKELY(NULL == result))
168✔
31
    return FALSE;
×
32

33
  BZERO(result, (MAXOBJGRANULES + 1) * sizeof(struct hblk *));
168✔
34
  ok->ok_reclaim_list = result;
168✔
35
  return TRUE;
168✔
36
}
37

38
/*
39
 * Allocate a large block of size `lb_adjusted` bytes with the requested
40
 * alignment (`align_m1 + 1`).  The block is not cleared.  We assume that
41
 * the size is nonzero and a multiple of `GC_GRANULE_BYTES`, and that
42
 * it already includes `EXTRA_BYTES` value.  The `flags` argument should
43
 * be `IGNORE_OFF_PAGE` or 0.  Calls `GC_allochblk()` to do the actual
44
 * allocation, but also triggers collection and/or heap expansion
45
 * as appropriate.  Updates value of `GC_bytes_allocd`; does also other
46
 * accounting.
47
 */
48
STATIC ptr_t
49
GC_alloc_large(size_t lb_adjusted, int kind, unsigned flags, size_t align_m1)
2,513,826✔
50
{
51
  /*
52
   * TODO: It is unclear which retries limit is sufficient (value of 3 leads
53
   * to fail in some 32-bit applications, 10 is a kind of arbitrary value).
54
   */
55
#define MAX_ALLOCLARGE_RETRIES 10
56

57
  int retry_cnt;
58
  size_t n_blocks; /*< includes alignment */
59
  struct hblk *h;
60
  ptr_t result;
61

62
  GC_ASSERT(I_HOLD_LOCK());
2,513,826✔
63
  if (UNLIKELY(!GC_is_initialized)) {
2,513,826✔
UNCOV
64
    UNLOCK(); /*< just to unset `GC_lock_holder` */
×
UNCOV
65
    GC_init();
×
66
    LOCK();
×
67
  }
68
  GC_ASSERT(lb_adjusted != 0 && (lb_adjusted & (GC_GRANULE_BYTES - 1)) == 0);
2,513,826✔
69
  n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(SIZET_SAT_ADD(lb_adjusted, align_m1));
2,513,826✔
70

71
  /* Do our share of marking work. */
72
  if (GC_incremental && !GC_dont_gc) {
2,513,826✔
73
    GC_collect_a_little_inner(n_blocks);
2,037,043✔
74
  }
75

76
  h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
2,513,826✔
77
#ifdef USE_MUNMAP
78
  if (NULL == h && GC_merge_unmapped()) {
2,513,826✔
79
    h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
7✔
80
  }
81
#endif
82
  for (retry_cnt = 0; NULL == h; retry_cnt++) {
2,516,804✔
83
    /*
84
     * Only a few iterations are expected at most, otherwise something
85
     * is wrong in one of the functions called below.
86
     */
87
    if (retry_cnt > MAX_ALLOCLARGE_RETRIES)
2,998✔
UNCOV
88
      ABORT("Too many retries in GC_alloc_large");
×
89
    if (UNLIKELY(!GC_collect_or_expand(n_blocks, flags, retry_cnt > 0)))
2,998✔
90
      return NULL;
20✔
91
    h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
2,978✔
92
  }
93

94
  GC_bytes_allocd += lb_adjusted;
2,513,806✔
95
  if (lb_adjusted > HBLKSIZE) {
2,513,806✔
96
    GC_large_allocd_bytes += HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted);
1,101,214✔
97
    if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
1,101,214✔
98
      GC_max_large_allocd_bytes = GC_large_allocd_bytes;
4,901✔
99
  }
100
  /* FIXME: Do we need some way to reset `GC_max_large_allocd_bytes`? */
101
  result = h->hb_body;
2,513,806✔
102
  GC_ASSERT((ADDR(result) & align_m1) == 0);
2,513,806✔
103
  return result;
2,513,806✔
104
}
105

106
/*
107
 * Allocate a large block of given size in bytes, clear it if appropriate.
108
 * We assume that the size is nonzero and a multiple of `GC_GRANULE_BYTES`,
109
 * and that it already includes `EXTRA_BYTES` value.  Update value of
110
 * `GC_bytes_allocd`.
111
 */
112
STATIC ptr_t
113
GC_alloc_large_and_clear(size_t lb_adjusted, int kind, unsigned flags)
740✔
114
{
115
  ptr_t result;
116

117
  GC_ASSERT(I_HOLD_LOCK());
740✔
118
  result = GC_alloc_large(lb_adjusted, kind, flags, 0 /* `align_m1` */);
740✔
119
  if (LIKELY(result != NULL)
740✔
120
      && (GC_debugging_started || GC_obj_kinds[kind].ok_init)) {
740✔
121
    /* Clear the whole block, in case of `GC_realloc` call. */
122
    BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted));
740✔
123
  }
124
  return result;
740✔
125
}
126

127
/*
128
 * Fill in additional entries in `GC_size_map`, including the `i`-th one.
129
 * Note that a filled in section of the array ending at `n` always has
130
 * the length of at least `n / 4`.
131
 */
132
STATIC void
133
GC_extend_size_map(size_t i)
164✔
134
{
135
  size_t original_lg = ALLOC_REQUEST_GRANS(i);
164✔
136
  size_t lg;
137
  /*
138
   * The size we try to preserve.  Close to `i`, unless this would
139
   * introduce too many distinct sizes.
140
   */
141
  size_t byte_sz = GRANULES_TO_BYTES(original_lg);
164✔
142
  size_t smaller_than_i = byte_sz - (byte_sz >> 3);
164✔
143
  /* The lowest indexed entry we initialize. */
144
  size_t low_limit;
145
  size_t number_of_objs;
146

147
  GC_ASSERT(I_HOLD_LOCK());
164✔
148
  GC_ASSERT(0 == GC_size_map[i]);
164✔
149
  if (0 == GC_size_map[smaller_than_i]) {
164✔
150
    low_limit = byte_sz - (byte_sz >> 2); /*< much smaller than `i` */
87✔
151
    lg = original_lg;
87✔
152
    while (GC_size_map[low_limit] != 0)
2,993✔
153
      low_limit++;
2,906✔
154
  } else {
155
    low_limit = smaller_than_i + 1;
77✔
156
    while (GC_size_map[low_limit] != 0)
4,434✔
157
      low_limit++;
4,357✔
158

159
    lg = ALLOC_REQUEST_GRANS(low_limit);
77✔
160
    lg += lg >> 3;
77✔
161
    if (lg < original_lg)
77✔
UNCOV
162
      lg = original_lg;
×
163
  }
164

165
  /*
166
   * For these larger sizes, we use an even number of granules.
167
   * This makes it easier to, e.g., construct a 16-byte-aligned
168
   * allocator even if `GC_GRANULE_BYTES` is 8.
169
   */
170
  lg = (lg + 1) & ~(size_t)1;
164✔
171
  if (lg > MAXOBJGRANULES)
164✔
UNCOV
172
    lg = MAXOBJGRANULES;
×
173

174
  /* If we can fit the same number of larger objects in a block, do so. */
175
  GC_ASSERT(lg != 0);
164✔
176
  number_of_objs = HBLK_GRANULES / lg;
164✔
177
  GC_ASSERT(number_of_objs != 0);
164✔
178
  lg = (HBLK_GRANULES / number_of_objs) & ~(size_t)1;
164✔
179

180
  /*
181
   * We may need one extra byte; do not always fill in
182
   * `GC_size_map[byte_sz]`.
183
   */
184
  byte_sz = GRANULES_TO_BYTES(lg) - EXTRA_BYTES;
164✔
185

186
  for (; low_limit <= byte_sz; low_limit++)
47,148✔
187
    GC_size_map[low_limit] = lg;
46,984✔
188
}
164✔
189

190
STATIC void *
191
GC_generic_malloc_inner_small(size_t lb, int kind)
8,853,050✔
192
{
193
  struct obj_kind *ok = &GC_obj_kinds[kind];
8,853,050✔
194
  size_t lg = GC_size_map[lb];
8,853,050✔
195
  void **opp = &ok->ok_freelist[lg];
8,853,050✔
196
  void *op = *opp;
8,853,050✔
197

198
  GC_ASSERT(I_HOLD_LOCK());
8,853,050✔
199
  if (UNLIKELY(NULL == op)) {
8,853,050✔
200
    if (0 == lg) {
1,051,881✔
201
      if (UNLIKELY(!GC_is_initialized)) {
164✔
202
        UNLOCK(); /*< just to unset `GC_lock_holder` */
×
203
        GC_init();
×
204
        LOCK();
×
UNCOV
205
        lg = GC_size_map[lb];
×
206
      }
207
      if (0 == lg) {
164✔
208
        GC_extend_size_map(lb);
164✔
209
        lg = GC_size_map[lb];
164✔
210
        GC_ASSERT(lg != 0);
164✔
211
      }
212
      /* Retry. */
213
      opp = &ok->ok_freelist[lg];
164✔
214
      op = *opp;
164✔
215
    }
216
    if (NULL == op) {
1,051,881✔
217
      if (NULL == ok->ok_reclaim_list && !GC_alloc_reclaim_list(ok))
1,051,881✔
UNCOV
218
        return NULL;
×
219
      op = GC_allocobj(lg, kind);
1,051,881✔
220
      if (NULL == op)
1,051,881✔
UNCOV
221
        return NULL;
×
222
    }
223
  }
224
  *opp = obj_link(op);
8,853,050✔
225
  obj_link(op) = NULL;
8,853,050✔
226
  GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
8,853,050✔
227
  return op;
8,853,050✔
228
}
229

230
GC_INNER void *
231
GC_generic_malloc_inner(size_t lb, int kind, unsigned flags)
6,101,447✔
232
{
233
  size_t lb_adjusted;
234

235
  GC_ASSERT(I_HOLD_LOCK());
6,101,447✔
236
  GC_ASSERT(kind < MAXOBJKINDS);
6,101,447✔
237
  if (SMALL_OBJ(lb)) {
6,101,447✔
238
    return GC_generic_malloc_inner_small(lb, kind);
6,100,707✔
239
  }
240

241
#if MAX_EXTRA_BYTES > 0
242
  if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
740✔
243
    /* No need to add `EXTRA_BYTES`. */
244
    lb_adjusted = lb;
527✔
245
  } else
246
#endif
247
  /* else */ {
248
    lb_adjusted = ADD_EXTRA_BYTES(lb);
213✔
249
  }
250
  return GC_alloc_large_and_clear(ROUNDUP_GRANULE_SIZE(lb_adjusted), kind,
740✔
251
                                  flags);
252
}
253

254
#ifdef GC_COLLECT_AT_MALLOC
255
#  if defined(CPPCHECK)
256
size_t GC_dbg_collect_at_malloc_min_lb = 16 * 1024; /*< some value */
257
#  else
258
size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
259
#  endif
260
#endif
261

262
GC_INNER void *
263
GC_generic_malloc_aligned(size_t lb, int kind, unsigned flags, size_t align_m1)
5,232,913✔
264
{
265
  void *result;
266

267
  GC_ASSERT(kind < MAXOBJKINDS);
5,232,913✔
268
  if (UNLIKELY(get_have_errors()))
5,232,913✔
269
    GC_print_all_errors();
149✔
270
  GC_notify_or_invoke_finalizers();
5,232,913✔
271
  GC_DBG_COLLECT_AT_MALLOC(lb);
272
  if (SMALL_OBJ(lb) && LIKELY(align_m1 < GC_GRANULE_BYTES)) {
5,232,913✔
273
    LOCK();
2,719,827✔
274
    result = GC_generic_malloc_inner_small(lb, kind);
2,719,827✔
275
    UNLOCK();
2,719,827✔
276
  } else {
277
#ifdef THREADS
278
    size_t lg;
279
#endif
280
    size_t lb_adjusted;
281
    GC_bool init;
282

283
#if MAX_EXTRA_BYTES > 0
284
    if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
2,513,086✔
285
      /* No need to add `EXTRA_BYTES`. */
286
      lb_adjusted = ROUNDUP_GRANULE_SIZE(lb);
630,000✔
287
#  ifdef THREADS
288
      lg = BYTES_TO_GRANULES(lb_adjusted);
630,000✔
289
#  endif
290
    } else
291
#endif
292
    /* else */ {
293
#ifndef THREADS
294
      size_t lg; /*< CPPCHECK */
295
#endif
296

297
      if (UNLIKELY(0 == lb))
1,883,086✔
UNCOV
298
        lb = 1;
×
299
      lg = ALLOC_REQUEST_GRANS(lb);
1,883,086✔
300
      lb_adjusted = GRANULES_TO_BYTES(lg);
1,883,086✔
301
    }
302

303
    init = GC_obj_kinds[kind].ok_init;
2,513,086✔
304
    if (LIKELY(align_m1 < GC_GRANULE_BYTES)) {
2,513,086✔
305
      align_m1 = 0;
2,512,734✔
306
    } else if (align_m1 < HBLKSIZE) {
352✔
307
      align_m1 = HBLKSIZE - 1;
226✔
308
    }
309
    LOCK();
2,513,086✔
310
    result = GC_alloc_large(lb_adjusted, kind, flags, align_m1);
2,513,086✔
311
    if (LIKELY(result != NULL)) {
2,513,086✔
312
      if (GC_debugging_started
2,513,066✔
313
#ifndef THREADS
314
          || init
315
#endif
316
      ) {
317
        BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted));
4✔
318
      } else {
319
#ifdef THREADS
320
        GC_ASSERT(GRANULES_TO_PTRS(lg) >= 2);
2,513,062✔
321
        /*
322
         * Clear any memory that might be used for the GC descriptors
323
         * before we release the allocator lock.
324
         */
325
        ((ptr_t *)result)[0] = NULL;
2,513,062✔
326
        ((ptr_t *)result)[1] = NULL;
2,513,062✔
327
        ((ptr_t *)result)[GRANULES_TO_PTRS(lg) - 1] = NULL;
2,513,062✔
328
        ((ptr_t *)result)[GRANULES_TO_PTRS(lg) - 2] = NULL;
2,513,062✔
329
#endif
330
      }
331
    }
332
    UNLOCK();
2,513,086✔
333
#ifdef THREADS
334
    if (init && !GC_debugging_started && result != NULL) {
2,513,086✔
335
      /* Clear the rest (i.e. excluding the initial 2 words). */
336
      BZERO((ptr_t *)result + 2,
2,417,006✔
337
            HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted) - 2 * sizeof(ptr_t));
338
    }
339
#endif
340
  }
341
  if (UNLIKELY(NULL == result)) {
5,232,913✔
342
    result = (*GC_get_oom_fn())(lb);
20✔
343
    /* Note: result might be misaligned. */
344
  }
345
  return result;
5,232,913✔
346
}
347

348
GC_API GC_ATTR_MALLOC void *GC_CALL
349
GC_generic_malloc(size_t lb, int kind)
642,999✔
350
{
351
  return GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */,
642,999✔
352
                                   0 /* `align_m1` */);
353
}
354

355
GC_API GC_ATTR_MALLOC void *GC_CALL
356
GC_malloc_kind_global(size_t lb, int kind)
13,289,318✔
357
{
358
  return GC_malloc_kind_aligned_global(lb, kind, 0 /* `align_m1` */);
13,289,318✔
359
}
360

361
GC_INNER void *
362
GC_malloc_kind_aligned_global(size_t lb, int kind, size_t align_m1)
13,290,137✔
363
{
364
  GC_ASSERT(kind < MAXOBJKINDS);
13,290,137✔
365
  if (SMALL_OBJ(lb) && LIKELY(align_m1 < HBLKSIZE / 2)) {
13,290,137✔
366
    void *op;
367
    void **opp;
368
    size_t lg;
369

370
    GC_DBG_COLLECT_AT_MALLOC(lb);
371
    LOCK();
11,407,088✔
372
    lg = GC_size_map[lb];
11,407,088✔
373
    opp = &GC_obj_kinds[kind].ok_freelist[lg];
11,407,088✔
374
    op = *opp;
11,407,088✔
375
    if (UNLIKELY(align_m1 >= GC_GRANULE_BYTES)) {
11,407,088✔
376
      /* TODO: Avoid linear search. */
377
      for (; (ADDR(op) & align_m1) != 0; op = *opp) {
10,549✔
378
        opp = &obj_link(op);
10,045✔
379
      }
380
    }
381
    if (LIKELY(op != NULL)) {
11,407,088✔
382
      GC_ASSERT(PTRFREE == kind || NULL == obj_link(op)
10,590,225✔
383
                || (ADDR(obj_link(op)) < GC_greatest_real_heap_addr
384
                    && GC_least_real_heap_addr < ADDR(obj_link(op))));
385
      *opp = obj_link(op);
10,590,225✔
386
      if (kind != PTRFREE)
10,590,225✔
387
        obj_link(op) = NULL;
10,345,410✔
388
      GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
10,590,225✔
389
      UNLOCK();
10,590,225✔
390
      GC_ASSERT((ADDR(op) & align_m1) == 0);
10,590,225✔
391
      return op;
10,590,225✔
392
    }
393
    UNLOCK();
816,863✔
394
  }
395

396
  /*
397
   * We make the `GC_clear_stack()` call a tail one, hoping to get more
398
   * of the stack.
399
   */
400
  return GC_clear_stack(
2,699,912✔
401
      GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */, align_m1));
402
}
403

404
#if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
405
GC_API GC_ATTR_MALLOC void *GC_CALL
406
GC_malloc_kind(size_t lb, int kind)
407
{
408
  return GC_malloc_kind_global(lb, kind);
409
}
410
#endif
411

412
GC_API GC_ATTR_MALLOC void *GC_CALL
413
GC_malloc_atomic(size_t lb)
41,131,094✔
414
{
415
  /* Allocate `lb` bytes of atomic (pointer-free) data. */
416
  return GC_malloc_kind(lb, PTRFREE);
41,131,094✔
417
}
418

419
GC_API GC_ATTR_MALLOC void *GC_CALL
420
GC_malloc(size_t lb)
155,755,732✔
421
{
422
  /* Allocate `lb` bytes of composite (pointer-containing) data. */
423
  return GC_malloc_kind(lb, NORMAL);
155,755,732✔
424
}
425

426
GC_API GC_ATTR_MALLOC void *GC_CALL
427
GC_generic_malloc_uncollectable(size_t lb, int kind)
8,668,415✔
428
{
429
  void *op;
430
  size_t lb_orig = lb;
8,668,415✔
431

432
  GC_ASSERT(kind < MAXOBJKINDS);
8,668,415✔
433
  if (EXTRA_BYTES != 0 && LIKELY(lb != 0)) {
8,668,415✔
434
    /*
435
     * We do not need the extra byte, since this will not be collected
436
     * anyway.
437
     */
438
    lb--;
8,668,352✔
439
  }
440

441
  if (SMALL_OBJ(lb)) {
17,336,830✔
442
    void **opp;
443
    size_t lg;
444

445
    if (UNLIKELY(get_have_errors()))
8,668,415✔
UNCOV
446
      GC_print_all_errors();
×
447
    GC_notify_or_invoke_finalizers();
8,668,415✔
448
    GC_DBG_COLLECT_AT_MALLOC(lb_orig);
449
    LOCK();
8,668,415✔
450
    lg = GC_size_map[lb];
8,668,415✔
451
    opp = &GC_obj_kinds[kind].ok_freelist[lg];
8,668,415✔
452
    op = *opp;
8,668,415✔
453
    if (LIKELY(op != NULL)) {
8,668,415✔
454
      *opp = obj_link(op);
8,635,899✔
455
      obj_link(op) = NULL;
8,635,899✔
456
      GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
8,635,899✔
457
      /*
458
       * Mark bit was already set on free list.  It will be cleared only
459
       * temporarily during a collection, as a result of the normal
460
       * free-list mark bit clearing.
461
       */
462
      GC_non_gc_bytes += GRANULES_TO_BYTES((word)lg);
8,635,899✔
463
    } else {
464
      op = GC_generic_malloc_inner_small(lb, kind);
32,516✔
465
      if (NULL == op) {
32,516✔
466
        GC_oom_func oom_fn = GC_oom_fn;
×
467
        UNLOCK();
×
UNCOV
468
        return (*oom_fn)(lb_orig);
×
469
      }
470
      /* For small objects, the free lists are completely marked. */
471
    }
472
    GC_ASSERT(GC_is_marked(op));
8,668,415✔
473
    UNLOCK();
8,668,415✔
474
  } else {
UNCOV
475
    op = GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */,
×
476
                                   0 /* `align_m1` */);
UNCOV
477
    if (op /* `!= NULL` */) { /*< CPPCHECK */
×
478
      hdr *hhdr;
479

480
      GC_ASSERT(HBLKDISPL(op) == 0); /*< large block */
×
481
      LOCK();
×
482
      hhdr = HDR(op);
×
UNCOV
483
      set_mark_bit_from_hdr(hhdr, 0); /*< the only object */
×
484
#ifndef THREADS
485
      /*
486
       * This is not guaranteed in the multi-threaded case because the
487
       * counter could be updated before locking.
488
       */
489
      GC_ASSERT(0 == hhdr->hb_n_marks);
490
#endif
491
      hhdr->hb_n_marks = 1;
×
UNCOV
492
      UNLOCK();
×
493
    }
494
  }
495
  return op;
8,668,415✔
496
}
497

498
GC_API GC_ATTR_MALLOC void *GC_CALL
499
GC_malloc_uncollectable(size_t lb)
3,025,341✔
500
{
501
  /*
502
   * Allocate `lb` bytes of pointer-containing, traced, but not collectible
503
   * data.
504
   */
505
  return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE);
3,025,341✔
506
}
507

508
#ifdef GC_ATOMIC_UNCOLLECTABLE
509
GC_API GC_ATTR_MALLOC void *GC_CALL
510
GC_malloc_atomic_uncollectable(size_t lb)
5,642,948✔
511
{
512
  return GC_generic_malloc_uncollectable(lb, AUNCOLLECTABLE);
5,642,948✔
513
}
514
#endif /* GC_ATOMIC_UNCOLLECTABLE */
515

516
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
517

518
#  ifndef MSWINCE
519
#    include <errno.h>
520
#  endif
521

522
#  ifdef REDIRECT_MALLOC_DEBUG
523
#    ifndef REDIRECT_MALLOC_UNCOLLECTABLE
524
#      define REDIRECT_MALLOC_F GC_debug_malloc_replacement
525
/*
526
 * Avoid unnecessary nested procedure calls here, by `#define` some `malloc`
527
 * replacements.  Otherwise we end up saving a meaningless return address in
528
 * the object.  It also speeds things up, but it is admittedly quite ugly.
529
 */
530
#      define GC_debug_malloc_replacement(lb) \
531
        GC_debug_malloc(lb, GC_DBG_EXTRAS)
532
#    else
533
#      define REDIRECT_MALLOC_F GC_debug_malloc_uncollectable_replacement
534
#      define GC_debug_malloc_uncollectable_replacement(lb) \
535
        GC_debug_malloc_uncollectable(lb, GC_DBG_EXTRAS)
536
#    endif
537
#  elif defined(REDIRECT_MALLOC_UNCOLLECTABLE)
538
#    define REDIRECT_MALLOC_F GC_malloc_uncollectable
539
#  else
540
#    define REDIRECT_MALLOC_F GC_malloc
541
#  endif
542

543
void *
544
malloc(size_t lb)
545
{
546
  /*
547
   * It might help to manually inline the `GC_malloc` call here.
548
   * But any decent compiler should reduce the extra procedure call
549
   * to at most a jump instruction in this case.
550
   */
551
  return REDIRECT_MALLOC_F(lb);
552
}
553

554
#  if defined(REDIR_MALLOC_AND_LINUX_THREADS)                    \
555
      && (defined(IGNORE_FREE) || defined(REDIRECT_MALLOC_DEBUG) \
556
          || !defined(REDIRECT_MALLOC_UNCOLLECTABLE))
557
#    ifdef HAVE_LIBPTHREAD_SO
558
STATIC ptr_t GC_libpthread_start = NULL;
559
STATIC ptr_t GC_libpthread_end = NULL;
560
#    endif
561
STATIC ptr_t GC_libld_start = NULL;
562
STATIC ptr_t GC_libld_end = NULL;
563
static GC_bool lib_bounds_set = FALSE;
564

565
GC_INNER void
566
GC_init_lib_bounds(void)
567
{
568
  IF_CANCEL(int cancel_state;)
569

570
  /*
571
   * This test does not need to ensure memory visibility, since the bounds
572
   * will be set when/if we create another thread.
573
   */
574
  if (LIKELY(lib_bounds_set))
575
    return;
576

577
  DISABLE_CANCEL(cancel_state);
578
  GC_init(); /*< if not called yet */
579

580
#    if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
581
  LOCK(); /*< just to set `GC_lock_holder` */
582
#    endif
583
#    ifdef HAVE_LIBPTHREAD_SO
584
  if (!GC_text_mapping("libpthread-", &GC_libpthread_start,
585
                       &GC_libpthread_end)) {
586
    WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
587
    /*
588
     * This might still work with some versions of `libpthread`,
589
     * so we do not `abort`.
590
     */
591
  }
592
#    endif
593
  if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
594
    WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
595
  }
596
#    if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
597
  UNLOCK();
598
#    endif
599
  RESTORE_CANCEL(cancel_state);
600
  lib_bounds_set = TRUE;
601
}
602
#  endif
603

604
void *
605
calloc(size_t n, size_t lb)
606
{
607
  if (UNLIKELY((lb | n) > GC_SQRT_SIZE_MAX) /*< fast initial test */
608
      && lb && n > GC_SIZE_MAX / lb)
609
    return (*GC_get_oom_fn())(GC_SIZE_MAX); /*< `n * lb` overflow */
610
#  ifdef REDIR_MALLOC_AND_LINUX_THREADS
611
#    if defined(REDIRECT_MALLOC_DEBUG) \
612
        || !defined(REDIRECT_MALLOC_UNCOLLECTABLE)
613
  /*
614
   * The linker may allocate some memory that is only pointed to by
615
   * memory-mapped thread stacks.  Make sure it is not collectible.
616
   */
617
  {
618
    ptr_t caller = (ptr_t)__builtin_return_address(0);
619

620
    GC_init_lib_bounds();
621
    if (ADDR_INSIDE(caller, GC_libld_start, GC_libld_end)
622
#      ifdef HAVE_LIBPTHREAD_SO
623
        /*
624
         * Note: the two ranges are actually usually adjacent, so there
625
         * may be a way to speed this up.
626
         */
627
        || ADDR_INSIDE(caller, GC_libpthread_start, GC_libpthread_end)
628
#      endif
629
    ) {
630
      return GC_generic_malloc_uncollectable(n * lb, UNCOLLECTABLE);
631
    }
632
  }
633
#    elif defined(IGNORE_FREE)
634
  /* Just to ensure `static` variables used by `free()` are initialized. */
635
  GC_init_lib_bounds();
636
#    endif
637
#  endif
638
  return REDIRECT_MALLOC_F(n * lb);
639
}
640

641
#  ifndef strdup
642
char *
643
strdup(const char *s)
644
{
645
  size_t lb = strlen(s) + 1;
646
  char *result = (char *)REDIRECT_MALLOC_F(lb);
647

648
  if (UNLIKELY(NULL == result)) {
649
#    ifndef MSWINCE
650
    errno = ENOMEM;
651
#    endif
652
    return NULL;
653
  }
654
  BCOPY(s, result, lb);
655
  return result;
656
}
657
#  else
658
/*
659
 * If `strdup` is macro defined, we assume that it actually calls `malloc`,
660
 * and thus the right thing will happen even without overriding it.
661
 * This seems to be true on most Linux systems.
662
 */
663
#  endif /* strdup */
664

665
#  ifndef strndup
666
/* This is similar to `strdup()`. */
667
char *
668
strndup(const char *str, size_t size)
669
{
670
  char *copy;
671
  size_t len = strlen(str);
672
  if (UNLIKELY(len > size))
673
    len = size;
674
  copy = (char *)REDIRECT_MALLOC_F(len + 1);
675
  if (UNLIKELY(NULL == copy)) {
676
#    ifndef MSWINCE
677
    errno = ENOMEM;
678
#    endif
679
    return NULL;
680
  }
681
  if (LIKELY(len > 0))
682
    BCOPY(str, copy, len);
683
  copy[len] = '\0';
684
  return copy;
685
}
686
#  endif /* !strndup */
687

688
#  undef GC_debug_malloc_replacement
689
#  undef GC_debug_malloc_uncollectable_replacement
690

691
#  ifdef REDIRECT_MALLOC_DEBUG
692
#    define REDIRECT_FREE_F GC_debug_free
693
#    define REDIRECT_FREEZERO_F GC_debug_freezero
694
#  else
695
#    define REDIRECT_FREE_F GC_free
696
#    define REDIRECT_FREEZERO_F GC_freezero
697
#  endif
698

699
void
700
free(void *p)
701
{
702
#  if defined(REDIR_MALLOC_AND_LINUX_THREADS) \
703
      && !defined(USE_PROC_FOR_LIBRARIES)     \
704
      && (defined(REDIRECT_MALLOC_DEBUG) || defined(IGNORE_FREE))
705
  /*
706
   * Do not bother with initialization checks.  If nothing has been
707
   * initialized, then the check fails, and that is safe, since we have
708
   * not allocated uncollectible objects neither.
709
   */
710
  ptr_t caller = (ptr_t)__builtin_return_address(0);
711

712
  /*
713
   * This test does not need to ensure memory visibility, since the bounds
714
   * will be set when/if we create another thread.
715
   */
716
  if (ADDR_INSIDE(caller, GC_libld_start, GC_libld_end)
717
#    ifdef HAVE_LIBPTHREAD_SO
718
      || ADDR_INSIDE(caller, GC_libpthread_start, GC_libpthread_end)
719
#    endif
720
  ) {
721
    GC_free(p);
722
    return;
723
  }
724
#  endif
725
#  ifdef IGNORE_FREE
726
  UNUSED_ARG(p);
727
#  else
728
  REDIRECT_FREE_F(p);
729
#  endif
730
}
731

732
void
733
freezero(void *p, size_t clear_lb)
734
{
735
  /* We do not expect the caller is in `libdl` or `libpthread`. */
736
#  ifdef IGNORE_FREE
737
  if (UNLIKELY(NULL == p) || UNLIKELY(0 == clear_lb))
738
    return;
739

740
  LOCK();
741
  {
742
    size_t lb;
743
#    ifdef REDIRECT_MALLOC_DEBUG
744
    ptr_t base = (ptr_t)GC_base(p);
745

746
    GC_ASSERT(base != NULL);
747
    lb = HDR(p)->hb_sz - (size_t)((ptr_t)p - base); /*< `sizeof(oh)` */
748
#    else
749
    GC_ASSERT(GC_base(p) == p);
750
    lb = HDR(p)->hb_sz;
751
#    endif
752
    if (LIKELY(clear_lb > lb))
753
      clear_lb = lb;
754
  }
755
  /* Skip deallocation but clear the object. */
756
  UNLOCK();
757
  BZERO(p, clear_lb);
758
#  else
759
  REDIRECT_FREEZERO_F(p, clear_lb);
760
#  endif
761
}
762

763
void
764
freezeroall(void *p)
765
{
766
  freezero(p, GC_SIZE_MAX);
767
}
768

769
#endif /* REDIRECT_MALLOC && !REDIRECT_MALLOC_IN_HEADER */
770

771
GC_INNER void
772
GC_free_internal(void *base, const hdr *hhdr, size_t clear_ofs,
18,260,804✔
773
                 size_t clear_lb)
774
{
775
  size_t lb = hhdr->hb_sz;           /*< size in bytes */
18,260,804✔
776
  size_t lg = BYTES_TO_GRANULES(lb); /*< size in granules */
18,260,804✔
777
  int kind = hhdr->hb_obj_kind;
18,260,804✔
778

779
  GC_ASSERT(I_HOLD_LOCK());
18,260,804✔
780
#ifdef LOG_ALLOCS
781
  GC_log_printf("Free %p after GC #%lu\n", base, (unsigned long)GC_gc_no);
782
#endif
783
  GC_bytes_freed += lb;
18,260,804✔
784
  if (IS_UNCOLLECTABLE(kind))
18,260,804✔
785
    GC_non_gc_bytes -= lb;
8,655,750✔
786

787
  /*
788
   * Ensure the part of object to clear does not overrun the object.
789
   * Note: `SIZET_SAT_ADD(clear_ofs, clear_lb) > lb` cannot be used instead as
790
   * otherwise "memset specified bound exceeds maximum object size" warning
791
   * (a false positive) is reported by gcc-13.
792
   */
793
  if (UNLIKELY(clear_ofs >= GC_SIZE_MAX - clear_lb)
18,260,804✔
794
      || UNLIKELY(clear_ofs + clear_lb > lb))
17,630,465✔
795
    clear_lb = lb > clear_ofs ? lb - clear_ofs : 0;
1,260,780✔
796

797
  if (LIKELY(lg <= MAXOBJGRANULES)) {
18,260,804✔
798
    struct obj_kind *ok = &GC_obj_kinds[kind];
18,260,674✔
799
    void **flh;
800

801
    if (ok->ok_init && LIKELY(lb > sizeof(ptr_t))) {
18,260,674✔
802
      clear_ofs = sizeof(ptr_t);
6,815,458✔
803
      clear_lb = lb - sizeof(ptr_t);
6,815,458✔
804
    }
805
    if (clear_lb > 0)
18,260,674✔
806
      BZERO((ptr_t)base + clear_ofs, clear_lb);
7,445,521✔
807

808
    /*
809
     * It is unnecessary to clear the mark bit.  If the object is reallocated,
810
     * it does not matter.  Otherwise, the collector will do it, since it is
811
     * on a free list.
812
     */
813

814
    flh = &ok->ok_freelist[lg];
18,260,674✔
815
    obj_link(base) = *flh;
18,260,674✔
816
    *flh = (ptr_t)base;
18,260,674✔
817
  } else {
818
    if (clear_lb > 0)
130✔
UNCOV
819
      BZERO((ptr_t)base + clear_ofs, clear_lb);
×
820
    if (lb > HBLKSIZE) {
130✔
821
      GC_large_allocd_bytes -= HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb);
126✔
822
    }
823
    GC_ASSERT(ADDR(HBLKPTR(base)) == ADDR(hhdr->hb_block));
130✔
824
    GC_freehblk(hhdr->hb_block);
130✔
825
  }
826
  FREE_PROFILER_HOOK(base);
18,260,804✔
827
}
18,260,804✔
828

829
GC_API void GC_CALL
830
GC_free(void *p)
16,206,940✔
831
{
832
  const hdr *hhdr;
833

834
  if (p /* `!= NULL` */) {
16,206,940✔
835
    /* CPPCHECK */
836
  } else {
837
    /* Required by ANSI.  It is not my fault... */
838
    return;
29,400✔
839
  }
840

841
  LOCK();
16,177,540✔
842
  hhdr = HDR(p);
16,177,540✔
843
#if defined(REDIRECT_MALLOC)                                           \
844
    && ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \
845
        || defined(REDIR_MALLOC_AND_LINUX_THREADS) || defined(MSWIN32))
846
  /*
847
   * This might be called indirectly by `GC_print_callers` to free the
848
   * result of `backtrace_symbols()`.  For the other cases, this seems
849
   * to happen implicitly.  Do not try to deallocate that memory.
850
   */
851
  if (UNLIKELY(NULL == hhdr)) {
852
    UNLOCK();
853
    return;
854
  }
855
#endif
856
  GC_ASSERT(GC_base(p) == p);
16,177,540✔
857
  GC_free_internal(p, hhdr, 0 /* `clear_ofs` */, 0 /* `clear_lb` */);
16,177,540✔
858
  UNLOCK();
16,177,540✔
859
}
860

861
GC_API void GC_CALL
862
GC_freezero(void *p, size_t clear_lb)
1,891,008✔
863
{
864
  if (UNLIKELY(NULL == p))
1,891,008✔
865
    return;
126✔
866

867
  LOCK();
1,890,882✔
868
  GC_ASSERT(GC_base(p) == p);
1,890,882✔
869
  GC_free_internal(p, HDR(p), 0 /* `clear_ofs` */, clear_lb);
1,890,882✔
870
  UNLOCK();
1,890,882✔
871
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc