• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 1981

29 Dec 2025 07:47PM UTC coverage: 73.055% (-0.5%) from 73.579%
1981

push

travis-ci

ivmai
Provide GC_FREEZERO() and redirect platform freezero[all]()

Issue #497 (bdwgc).

* .cspell.json: Add `FREEZERO`, `freezero`, `FREEZEROALL`,
`freezeroall` words.
* README.md (The C Interface to the Allocator, Debugging Facilities):
Mention `GC_freezero()`.
* dbg_mlc.c (debug_free_zero): New static function (move code from
`GC_debug_free()` but add `clear_lb` argument).
* dbg_mlc.c (GC_debug_free): Call `debug_free_zero()`.
* dbg_mlc.c (GC_debug_freezero): Implement.
* malloc.c (GC_freezero): Likewise.
* docs/debugging.md (Prematurely Reclaimed Objects): Update about
incorrect `GC_free` call.
* docs/leak.md: Mention `freezero` and `freezeroall`.
* docs/macros.md (REDIRECT_MALLOC, REDIRECT_MALLOC_DEBUG, IGNORE_FREE):
Likewise.
* include/gc/gc.h (GC_find_leak, GC_non_gc_bytes, GC_malloc, GC_base):
Update comment.
* include/gc/gc_disclaim.h (GC_register_disclaim_proc): Likewise.
* include/gc/gc.h (GC_freezero, GC_debug_freezero): New API function
declaration.
* include/gc/gc.h (GC_FREEZERO, GC_FREEZEROALL): New public macro.
* include/gc/leak_detector.h (freezero, freezeroall): Undefine and
redefine macro.
* include/private/gc_priv.h (GC_free_internal): Update comment.
* include/private/gc_priv.h (GC_free_internal): Append `clear_ofs` and
`clear_lb` arguments.
* malloc.c (GC_free_internal): Likewise.
* malloc.c [REDIRECT_MALLOC && !REDIRECT_MALLOC_IN_HEADER]
(REDIRECT_FREEZERO_F): New macro.
* malloc.c [REDIRECT_MALLOC && !REDIRECT_MALLOC_IN_HEADER] (freezero,
freezeroall): Implement.
* malloc.c (GC_free_internal): Call `BZERO()` if `clear_lb` is
positive.
* tests/gctest.c (test_generic_malloc_or_special, run_one_test): Call
`GC_FREEZEROALL()` instead of some `GC_FREE()` calls.
* tests/gctest.c (run_one_test): Call `GC_freezero` instead of some
`GC_free()` calls; call `GC_FREEZERO()` instead of some `GC_FREE()`
calls.
* tests/leak.c (main): Call `freezero()` and `freezeroall()`.

25 of 27 new or added lines in 2 files covered. (92.59%)

181 existing lines in 6 files now uncovered.

6488 of 8881 relevant lines covered (73.05%)

14037784.62 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.49
/malloc.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5
 * Copyright (c) 2008-2025 Ivan Maidanski
6
 *
7
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9
 *
10
 * Permission is hereby granted to use or copy this program
11
 * for any purpose, provided the above notices are retained on all copies.
12
 * Permission to modify the code and to distribute modified code is granted,
13
 * provided the above notices are retained, and a notice that the code was
14
 * modified is included with the above copyright notice.
15
 */
16

17
#include "private/gc_priv.h"
18

19
#include <string.h>
20

21
/* Allocate reclaim list for the kind.  Returns `TRUE` on success. */
22
STATIC GC_bool
23
GC_alloc_reclaim_list(struct obj_kind *ok)
116✔
24
{
25
  struct hblk **result;
26

27
  GC_ASSERT(I_HOLD_LOCK());
116✔
28
  result = (struct hblk **)GC_scratch_alloc((MAXOBJGRANULES + 1)
116✔
29
                                            * sizeof(struct hblk *));
30
  if (UNLIKELY(NULL == result))
116✔
31
    return FALSE;
×
32

33
  BZERO(result, (MAXOBJGRANULES + 1) * sizeof(struct hblk *));
116✔
34
  ok->ok_reclaim_list = result;
116✔
35
  return TRUE;
116✔
36
}
37

38
/*
39
 * Allocate a large block of size `lb_adjusted` bytes with the requested
40
 * alignment (`align_m1 + 1`).  The block is not cleared.  We assume that
41
 * the size is nonzero and a multiple of `GC_GRANULE_BYTES`, and that
42
 * it already includes `EXTRA_BYTES` value.  The `flags` argument should
43
 * be `IGNORE_OFF_PAGE` or 0.  Calls `GC_allochblk()` to do the actual
44
 * allocation, but also triggers collection and/or heap expansion
45
 * as appropriate.  Updates value of `GC_bytes_allocd`; does also other
46
 * accounting.
47
 */
48
STATIC ptr_t
49
GC_alloc_large(size_t lb_adjusted, int kind, unsigned flags, size_t align_m1)
1,652,061✔
50
{
51
  /*
52
   * TODO: It is unclear which retries limit is sufficient (value of 3 leads
53
   * to fail in some 32-bit applications, 10 is a kind of arbitrary value).
54
   */
55
#define MAX_ALLOCLARGE_RETRIES 10
56

57
  int retry_cnt;
58
  size_t n_blocks; /*< includes alignment */
59
  struct hblk *h;
60
  ptr_t result;
61

62
  GC_ASSERT(I_HOLD_LOCK());
1,652,061✔
63
  GC_ASSERT(lb_adjusted != 0 && (lb_adjusted & (GC_GRANULE_BYTES - 1)) == 0);
1,652,061✔
64
  n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(SIZET_SAT_ADD(lb_adjusted, align_m1));
1,652,061✔
65
  if (UNLIKELY(!GC_is_initialized)) {
1,652,061✔
66
    UNLOCK(); /*< just to unset `GC_lock_holder` */
×
67
    GC_init();
×
68
    LOCK();
×
69
  }
70
  /* Do our share of marking work. */
71
  if (GC_incremental && !GC_dont_gc) {
1,652,061✔
72
    GC_collect_a_little_inner(n_blocks);
1,484,459✔
73
  }
74

75
  h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
1,652,061✔
76
#ifdef USE_MUNMAP
77
  if (NULL == h && GC_merge_unmapped()) {
1,652,061✔
78
    h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
4✔
79
  }
80
#endif
81
  for (retry_cnt = 0; NULL == h; retry_cnt++) {
1,655,430✔
82
    /*
83
     * Only a few iterations are expected at most, otherwise something
84
     * is wrong in one of the functions called below.
85
     */
86
    if (retry_cnt > MAX_ALLOCLARGE_RETRIES)
3,389✔
87
      ABORT("Too many retries in GC_alloc_large");
×
88
    if (UNLIKELY(!GC_collect_or_expand(n_blocks, flags, retry_cnt > 0)))
3,389✔
89
      return NULL;
20✔
90
    h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
3,369✔
91
  }
92

93
  GC_bytes_allocd += lb_adjusted;
1,652,041✔
94
  if (lb_adjusted > HBLKSIZE) {
1,652,041✔
95
    GC_large_allocd_bytes += HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted);
698,583✔
96
    if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
698,583✔
97
      GC_max_large_allocd_bytes = GC_large_allocd_bytes;
2,890✔
98
  }
99
  /* FIXME: Do we need some way to reset `GC_max_large_allocd_bytes`? */
100
  result = h->hb_body;
1,652,041✔
101
  GC_ASSERT((ADDR(result) & align_m1) == 0);
1,652,041✔
102
  return result;
1,652,041✔
103
}
104

105
/*
106
 * Allocate a large block of given size in bytes, clear it if appropriate.
107
 * We assume that the size is nonzero and a multiple of `GC_GRANULE_BYTES`,
108
 * and that it already includes `EXTRA_BYTES` value.  Update value of
109
 * `GC_bytes_allocd`.
110
 */
111
STATIC ptr_t
112
GC_alloc_large_and_clear(size_t lb_adjusted, int kind, unsigned flags)
408✔
113
{
114
  ptr_t result;
115

116
  GC_ASSERT(I_HOLD_LOCK());
408✔
117
  result = GC_alloc_large(lb_adjusted, kind, flags, 0 /* `align_m1` */);
408✔
118
  if (LIKELY(result != NULL)
408✔
119
      && (GC_debugging_started || GC_obj_kinds[kind].ok_init)) {
408✔
120
    /* Clear the whole block, in case of `GC_realloc` call. */
121
    BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted));
408✔
122
  }
123
  return result;
408✔
124
}
125

126
/*
127
 * Fill in additional entries in `GC_size_map`, including the `i`-th one.
128
 * Note that a filled in section of the array ending at `n` always has
129
 * the length of at least `n / 4`.
130
 */
131
STATIC void
132
GC_extend_size_map(size_t i)
104✔
133
{
134
  size_t original_lg = ALLOC_REQUEST_GRANS(i);
104✔
135
  size_t lg;
136
  /*
137
   * The size we try to preserve.  Close to `i`, unless this would
138
   * introduce too many distinct sizes.
139
   */
140
  size_t byte_sz = GRANULES_TO_BYTES(original_lg);
104✔
141
  size_t smaller_than_i = byte_sz - (byte_sz >> 3);
104✔
142
  /* The lowest indexed entry we initialize. */
143
  size_t low_limit;
144
  size_t number_of_objs;
145

146
  GC_ASSERT(I_HOLD_LOCK());
104✔
147
  GC_ASSERT(0 == GC_size_map[i]);
104✔
148
  if (0 == GC_size_map[smaller_than_i]) {
104✔
149
    low_limit = byte_sz - (byte_sz >> 2); /*< much smaller than `i` */
58✔
150
    lg = original_lg;
58✔
151
    while (GC_size_map[low_limit] != 0)
1,560✔
152
      low_limit++;
1,502✔
153
  } else {
154
    low_limit = smaller_than_i + 1;
46✔
155
    while (GC_size_map[low_limit] != 0)
2,844✔
156
      low_limit++;
2,798✔
157

158
    lg = ALLOC_REQUEST_GRANS(low_limit);
46✔
159
    lg += lg >> 3;
46✔
160
    if (lg < original_lg)
46✔
161
      lg = original_lg;
×
162
  }
163

164
  /*
165
   * For these larger sizes, we use an even number of granules.
166
   * This makes it easier to, e.g., construct a 16-byte-aligned
167
   * allocator even if `GC_GRANULE_BYTES` is 8.
168
   */
169
  lg = (lg + 1) & ~(size_t)1;
104✔
170
  if (lg > MAXOBJGRANULES)
104✔
171
    lg = MAXOBJGRANULES;
×
172

173
  /* If we can fit the same number of larger objects in a block, do so. */
174
  GC_ASSERT(lg != 0);
104✔
175
  number_of_objs = HBLK_GRANULES / lg;
104✔
176
  GC_ASSERT(number_of_objs != 0);
104✔
177
  lg = (HBLK_GRANULES / number_of_objs) & ~(size_t)1;
104✔
178

179
  /*
180
   * We may need one extra byte; do not always fill in
181
   * `GC_size_map[byte_sz]`.
182
   */
183
  byte_sz = GRANULES_TO_BYTES(lg) - EXTRA_BYTES;
104✔
184

185
  for (; low_limit <= byte_sz; low_limit++)
31,036✔
186
    GC_size_map[low_limit] = lg;
30,932✔
187
}
104✔
188

189
STATIC void *
190
GC_generic_malloc_inner_small(size_t lb, int kind)
7,607,264✔
191
{
192
  struct obj_kind *ok = &GC_obj_kinds[kind];
7,607,264✔
193
  size_t lg = GC_size_map[lb];
7,607,264✔
194
  void **opp = &ok->ok_freelist[lg];
7,607,264✔
195
  void *op = *opp;
7,607,264✔
196

197
  GC_ASSERT(I_HOLD_LOCK());
7,607,264✔
198
  if (UNLIKELY(NULL == op)) {
7,607,264✔
199
    if (0 == lg) {
798,867✔
200
      if (UNLIKELY(!GC_is_initialized)) {
104✔
201
        UNLOCK(); /*< just to unset `GC_lock_holder` */
×
202
        GC_init();
×
203
        LOCK();
×
204
        lg = GC_size_map[lb];
×
205
      }
206
      if (0 == lg) {
104✔
207
        GC_extend_size_map(lb);
104✔
208
        lg = GC_size_map[lb];
104✔
209
        GC_ASSERT(lg != 0);
104✔
210
      }
211
      /* Retry. */
212
      opp = &ok->ok_freelist[lg];
104✔
213
      op = *opp;
104✔
214
    }
215
    if (NULL == op) {
798,867✔
216
      if (NULL == ok->ok_reclaim_list && !GC_alloc_reclaim_list(ok))
798,866✔
217
        return NULL;
×
218
      op = GC_allocobj(lg, kind);
798,866✔
219
      if (NULL == op)
798,866✔
220
        return NULL;
×
221
    }
222
  }
223
  *opp = obj_link(op);
7,607,264✔
224
  obj_link(op) = NULL;
7,607,264✔
225
  GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
7,607,264✔
226
  return op;
7,607,264✔
227
}
228

229
GC_INNER void *
230
GC_generic_malloc_inner(size_t lb, int kind, unsigned flags)
5,691,994✔
231
{
232
  size_t lb_adjusted;
233

234
  GC_ASSERT(I_HOLD_LOCK());
5,691,994✔
235
  GC_ASSERT(kind < MAXOBJKINDS);
5,691,994✔
236
  if (SMALL_OBJ(lb)) {
5,691,994✔
237
    return GC_generic_malloc_inner_small(lb, kind);
5,691,586✔
238
  }
239

240
#if MAX_EXTRA_BYTES > 0
241
  if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
408✔
242
    /* No need to add `EXTRA_BYTES`. */
243
    lb_adjusted = lb;
280✔
244
  } else
245
#endif
246
  /* else */ {
247
    lb_adjusted = ADD_EXTRA_BYTES(lb);
128✔
248
  }
249
  return GC_alloc_large_and_clear(ROUNDUP_GRANULE_SIZE(lb_adjusted), kind,
408✔
250
                                  flags);
251
}
252

253
#ifdef GC_COLLECT_AT_MALLOC
254
#  if defined(CPPCHECK)
255
size_t GC_dbg_collect_at_malloc_min_lb = 16 * 1024; /*< some value */
256
#  else
257
size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
258
#  endif
259
#endif
260

261
GC_INNER void *
262
GC_generic_malloc_aligned(size_t lb, int kind, unsigned flags, size_t align_m1)
3,534,701✔
263
{
264
  void *result;
265

266
  GC_ASSERT(kind < MAXOBJKINDS);
3,534,701✔
267
  if (UNLIKELY(get_have_errors()))
3,534,701✔
268
    GC_print_all_errors();
127✔
269
  GC_notify_or_invoke_finalizers();
3,534,701✔
270
  GC_DBG_COLLECT_AT_MALLOC(lb);
271
  if (SMALL_OBJ(lb) && LIKELY(align_m1 < GC_GRANULE_BYTES)) {
3,534,701✔
272
    LOCK();
1,883,048✔
273
    result = GC_generic_malloc_inner_small(lb, kind);
1,883,048✔
274
    UNLOCK();
1,883,048✔
275
  } else {
276
#ifdef THREADS
277
    size_t lg;
278
#endif
279
    size_t lb_adjusted;
280
    GC_bool init;
281

282
#if MAX_EXTRA_BYTES > 0
283
    if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
1,651,653✔
284
      /* No need to add `EXTRA_BYTES`. */
285
      lb_adjusted = ROUNDUP_GRANULE_SIZE(lb);
420,000✔
286
#  ifdef THREADS
287
      lg = BYTES_TO_GRANULES(lb_adjusted);
420,000✔
288
#  endif
289
    } else
290
#endif
291
    /* else */ {
292
#ifndef THREADS
293
      size_t lg; /*< CPPCHECK */
294
#endif
295

296
      if (UNLIKELY(0 == lb))
1,231,653✔
297
        lb = 1;
×
298
      lg = ALLOC_REQUEST_GRANS(lb);
1,231,653✔
299
      lb_adjusted = GRANULES_TO_BYTES(lg);
1,231,653✔
300
    }
301

302
    init = GC_obj_kinds[kind].ok_init;
1,651,653✔
303
    if (LIKELY(align_m1 < GC_GRANULE_BYTES)) {
1,651,653✔
304
      align_m1 = 0;
1,651,415✔
305
    } else if (align_m1 < HBLKSIZE) {
238✔
306
      align_m1 = HBLKSIZE - 1;
154✔
307
    }
308
    LOCK();
1,651,653✔
309
    result = GC_alloc_large(lb_adjusted, kind, flags, align_m1);
1,651,653✔
310
    if (LIKELY(result != NULL)) {
1,651,653✔
311
      if (GC_debugging_started
1,651,633✔
312
#ifndef THREADS
313
          || init
314
#endif
315
      ) {
316
        BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted));
4✔
317
      } else {
318
#ifdef THREADS
319
        GC_ASSERT(GRANULES_TO_PTRS(lg) >= 2);
1,651,629✔
320
        /*
321
         * Clear any memory that might be used for the GC descriptors
322
         * before we release the allocator lock.
323
         */
324
        ((ptr_t *)result)[0] = NULL;
1,651,629✔
325
        ((ptr_t *)result)[1] = NULL;
1,651,629✔
326
        ((ptr_t *)result)[GRANULES_TO_PTRS(lg) - 1] = NULL;
1,651,629✔
327
        ((ptr_t *)result)[GRANULES_TO_PTRS(lg) - 2] = NULL;
1,651,629✔
328
#endif
329
      }
330
    }
331
    UNLOCK();
1,651,653✔
332
#ifdef THREADS
333
    if (init && !GC_debugging_started && result != NULL) {
1,651,653✔
334
      /* Clear the rest (i.e. excluding the initial 2 words). */
335
      BZERO((ptr_t *)result + 2,
1,555,573✔
336
            HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted) - 2 * sizeof(ptr_t));
337
    }
338
#endif
339
  }
340
  if (UNLIKELY(NULL == result)) {
3,534,701✔
341
    result = (*GC_get_oom_fn())(lb);
20✔
342
    /* Note: result might be misaligned. */
343
  }
344
  return result;
3,534,701✔
345
}
346

347
GC_API GC_ATTR_MALLOC void *GC_CALL
348
GC_generic_malloc(size_t lb, int kind)
432,545✔
349
{
350
  return GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */,
432,545✔
351
                                   0 /* `align_m1` */);
352
}
353

354
GC_API GC_ATTR_MALLOC void *GC_CALL
355
GC_malloc_kind_global(size_t lb, int kind)
11,248,203✔
356
{
357
  return GC_malloc_kind_aligned_global(lb, kind, 0 /* `align_m1` */);
11,248,203✔
358
}
359

360
GC_INNER void *
361
GC_malloc_kind_aligned_global(size_t lb, int kind, size_t align_m1)
11,248,749✔
362
{
363
  GC_ASSERT(kind < MAXOBJKINDS);
11,248,749✔
364
  if (SMALL_OBJ(lb) && LIKELY(align_m1 < HBLKSIZE / 2)) {
11,248,749✔
365
    void *op;
366
    void **opp;
367
    size_t lg;
368

369
    GC_DBG_COLLECT_AT_MALLOC(lb);
370
    LOCK();
10,017,124✔
371
    lg = GC_size_map[lb];
10,017,124✔
372
    opp = &GC_obj_kinds[kind].ok_freelist[lg];
10,017,124✔
373
    op = *opp;
10,017,124✔
374
    if (UNLIKELY(align_m1 >= GC_GRANULE_BYTES)) {
10,017,124✔
375
      /* TODO: Avoid linear search. */
376
      for (; (ADDR(op) & align_m1) != 0; op = *opp) {
6,950✔
377
        opp = &obj_link(op);
6,614✔
378
      }
379
    }
380
    if (LIKELY(op != NULL)) {
10,017,124✔
381
      GC_ASSERT(PTRFREE == kind || NULL == obj_link(op)
9,406,595✔
382
                || (ADDR(obj_link(op)) < GC_greatest_real_heap_addr
383
                    && GC_least_real_heap_addr < ADDR(obj_link(op))));
384
      *opp = obj_link(op);
9,406,595✔
385
      if (kind != PTRFREE)
9,406,595✔
386
        obj_link(op) = NULL;
9,199,580✔
387
      GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
9,406,595✔
388
      UNLOCK();
9,406,595✔
389
      GC_ASSERT((ADDR(op) & align_m1) == 0);
9,406,595✔
390
      return op;
9,406,595✔
391
    }
392
    UNLOCK();
610,529✔
393
  }
394

395
  /*
396
   * We make the `GC_clear_stack()` call a tail one, hoping to get more
397
   * of the stack.
398
   */
399
  return GC_clear_stack(
1,842,154✔
400
      GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */, align_m1));
401
}
402

403
#if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
404
GC_API GC_ATTR_MALLOC void *GC_CALL
405
GC_malloc_kind(size_t lb, int kind)
406
{
407
  return GC_malloc_kind_global(lb, kind);
408
}
409
#endif
410

411
GC_API GC_ATTR_MALLOC void *GC_CALL
412
GC_malloc_atomic(size_t lb)
38,377,915✔
413
{
414
  /* Allocate `lb` bytes of atomic (pointer-free) data. */
415
  return GC_malloc_kind(lb, PTRFREE);
38,377,915✔
416
}
417

418
GC_API GC_ATTR_MALLOC void *GC_CALL
419
GC_malloc(size_t lb)
129,741,379✔
420
{
421
  /* Allocate `lb` bytes of composite (pointer-containing) data. */
422
  return GC_malloc_kind(lb, NORMAL);
129,741,379✔
423
}
424

425
GC_API GC_ATTR_MALLOC void *GC_CALL
426
GC_generic_malloc_uncollectable(size_t lb, int kind)
8,664,068✔
427
{
428
  void *op;
429
  size_t lb_orig = lb;
8,664,068✔
430

431
  GC_ASSERT(kind < MAXOBJKINDS);
8,664,068✔
432
  if (EXTRA_BYTES != 0 && LIKELY(lb != 0)) {
8,664,068✔
433
    /*
434
     * We do not need the extra byte, since this will not be collected
435
     * anyway.
436
     */
437
    lb--;
8,664,026✔
438
  }
439

440
  if (SMALL_OBJ(lb)) {
17,328,136✔
441
    void **opp;
442
    size_t lg;
443

444
    if (UNLIKELY(get_have_errors()))
8,664,068✔
445
      GC_print_all_errors();
×
446
    GC_notify_or_invoke_finalizers();
8,664,068✔
447
    GC_DBG_COLLECT_AT_MALLOC(lb_orig);
448
    LOCK();
8,664,068✔
449
    lg = GC_size_map[lb];
8,664,068✔
450
    opp = &GC_obj_kinds[kind].ok_freelist[lg];
8,664,068✔
451
    op = *opp;
8,664,068✔
452
    if (LIKELY(op != NULL)) {
8,664,068✔
453
      *opp = obj_link(op);
8,631,439✔
454
      obj_link(op) = NULL;
8,631,439✔
455
      GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
8,631,439✔
456
      /*
457
       * Mark bit was already set on free list.  It will be cleared only
458
       * temporarily during a collection, as a result of the normal
459
       * free-list mark bit clearing.
460
       */
461
      GC_non_gc_bytes += GRANULES_TO_BYTES((word)lg);
8,631,439✔
462
    } else {
463
      op = GC_generic_malloc_inner_small(lb, kind);
32,629✔
464
      if (NULL == op) {
32,629✔
465
        GC_oom_func oom_fn = GC_oom_fn;
×
466
        UNLOCK();
×
467
        return (*oom_fn)(lb_orig);
×
468
      }
469
      /* For small objects, the free lists are completely marked. */
470
    }
471
    GC_ASSERT(GC_is_marked(op));
8,664,068✔
472
    UNLOCK();
8,664,068✔
473
  } else {
474
    op = GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */,
×
475
                                   0 /* `align_m1` */);
476
    if (op /* `!= NULL` */) { /*< CPPCHECK */
×
477
      hdr *hhdr;
478

479
      GC_ASSERT(HBLKDISPL(op) == 0); /*< large block */
×
480
      LOCK();
×
481
      hhdr = HDR(op);
×
482
      set_mark_bit_from_hdr(hhdr, 0); /*< the only object */
×
483
#ifndef THREADS
484
      /*
485
       * This is not guaranteed in the multi-threaded case because the
486
       * counter could be updated before locking.
487
       */
488
      GC_ASSERT(0 == hhdr->hb_n_marks);
489
#endif
490
      hhdr->hb_n_marks = 1;
×
491
      UNLOCK();
×
492
    }
493
  }
494
  return op;
8,664,068✔
495
}
496

497
GC_API GC_ATTR_MALLOC void *GC_CALL
498
GC_malloc_uncollectable(size_t lb)
3,021,036✔
499
{
500
  /*
501
   * Allocate `lb` bytes of pointer-containing, traced, but not collectible
502
   * data.
503
   */
504
  return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE);
3,021,036✔
505
}
506

507
#ifdef GC_ATOMIC_UNCOLLECTABLE
508
GC_API GC_ATTR_MALLOC void *GC_CALL
509
GC_malloc_atomic_uncollectable(size_t lb)
5,642,948✔
510
{
511
  return GC_generic_malloc_uncollectable(lb, AUNCOLLECTABLE);
5,642,948✔
512
}
513
#endif /* GC_ATOMIC_UNCOLLECTABLE */
514

515
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
516

517
#  ifndef MSWINCE
518
#    include <errno.h>
519
#  endif
520

521
#  ifdef REDIRECT_MALLOC_DEBUG
522
#    ifndef REDIRECT_MALLOC_UNCOLLECTABLE
523
#      define REDIRECT_MALLOC_F GC_debug_malloc_replacement
524
/*
525
 * Avoid unnecessary nested procedure calls here, by `#define` some `malloc`
526
 * replacements.  Otherwise we end up saving a meaningless return address in
527
 * the object.  It also speeds things up, but it is admittedly quite ugly.
528
 */
529
#      define GC_debug_malloc_replacement(lb) \
530
        GC_debug_malloc(lb, GC_DBG_EXTRAS)
531
#    else
532
#      define REDIRECT_MALLOC_F GC_debug_malloc_uncollectable_replacement
533
#      define GC_debug_malloc_uncollectable_replacement(lb) \
534
        GC_debug_malloc_uncollectable(lb, GC_DBG_EXTRAS)
535
#    endif
536
#  elif defined(REDIRECT_MALLOC_UNCOLLECTABLE)
537
#    define REDIRECT_MALLOC_F GC_malloc_uncollectable
538
#  else
539
#    define REDIRECT_MALLOC_F GC_malloc
540
#  endif
541

542
void *
543
malloc(size_t lb)
544
{
545
  /*
546
   * It might help to manually inline the `GC_malloc` call here.
547
   * But any decent compiler should reduce the extra procedure call
548
   * to at most a jump instruction in this case.
549
   */
550
#  if defined(SOLARIS) && defined(THREADS) && defined(I386)
551
  /*
552
   * Thread initialization can call `malloc` before we are ready for.
553
   * It is not clear that this is enough to help matters.  The thread
554
   * implementation may well call `malloc` at other inopportune times.
555
   */
556
  if (UNLIKELY(!GC_is_initialized))
557
    return sbrk(lb);
558
#  endif
559
  return (void *)REDIRECT_MALLOC_F(lb);
560
}
561

562
#  ifdef REDIR_MALLOC_AND_LINUX_THREADS
563
#    ifdef HAVE_LIBPTHREAD_SO
564
STATIC ptr_t GC_libpthread_start = NULL;
565
STATIC ptr_t GC_libpthread_end = NULL;
566
#    endif
567
STATIC ptr_t GC_libld_start = NULL;
568
STATIC ptr_t GC_libld_end = NULL;
569
static GC_bool lib_bounds_set = FALSE;
570

571
GC_INNER void
572
GC_init_lib_bounds(void)
573
{
574
  IF_CANCEL(int cancel_state;)
575

576
  /*
577
   * This test does not need to ensure memory visibility, since the bounds
578
   * will be set when/if we create another thread.
579
   */
580
  if (LIKELY(lib_bounds_set))
581
    return;
582

583
  DISABLE_CANCEL(cancel_state);
584
  GC_init(); /*< if not called yet */
585

586
#    if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
587
  LOCK(); /*< just to set `GC_lock_holder` */
588
#    endif
589
#    ifdef HAVE_LIBPTHREAD_SO
590
  if (!GC_text_mapping("libpthread-", &GC_libpthread_start,
591
                       &GC_libpthread_end)) {
592
    WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
593
    /*
594
     * This might still work with some versions of `libpthread`,
595
     * so we do not `abort`.
596
     */
597
  }
598
#    endif
599
  if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
600
    WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
601
  }
602
#    if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
603
  UNLOCK();
604
#    endif
605
  RESTORE_CANCEL(cancel_state);
606
  lib_bounds_set = TRUE;
607
}
608
#  endif /* REDIR_MALLOC_AND_LINUX_THREADS */
609

610
void *
611
calloc(size_t n, size_t lb)
612
{
613
  if (UNLIKELY((lb | n) > GC_SQRT_SIZE_MAX) /*< fast initial test */
614
      && lb && n > GC_SIZE_MAX / lb)
615
    return (*GC_get_oom_fn())(GC_SIZE_MAX); /*< `n * lb` overflow */
616
#  ifdef REDIR_MALLOC_AND_LINUX_THREADS
617
  /*
618
   * The linker may allocate some memory that is only pointed to by
619
   * memory-mapped thread stacks.  Make sure it is not collectible.
620
   */
621
  {
622
    ptr_t caller = (ptr_t)__builtin_return_address(0);
623

624
    GC_init_lib_bounds();
625
    if (ADDR_INSIDE(caller, GC_libld_start, GC_libld_end)
626
#    ifdef HAVE_LIBPTHREAD_SO
627
        /*
628
         * Note: the two ranges are actually usually adjacent, so there
629
         * may be a way to speed this up.
630
         */
631
        || ADDR_INSIDE(caller, GC_libpthread_start, GC_libpthread_end)
632
#    endif
633
    ) {
634
      return GC_generic_malloc_uncollectable(n * lb, UNCOLLECTABLE);
635
    }
636
  }
637
#  endif
638
  return (void *)REDIRECT_MALLOC_F(n * lb);
639
}
640

641
#  ifndef strdup
642
char *
643
strdup(const char *s)
644
{
645
  size_t lb = strlen(s) + 1;
646
  char *result = (char *)REDIRECT_MALLOC_F(lb);
647

648
  if (UNLIKELY(NULL == result)) {
649
#    ifndef MSWINCE
650
    errno = ENOMEM;
651
#    endif
652
    return NULL;
653
  }
654
  BCOPY(s, result, lb);
655
  return result;
656
}
657
#  else
658
/*
659
 * If `strdup` is macro defined, we assume that it actually calls `malloc`,
660
 * and thus the right thing will happen even without overriding it.
661
 * This seems to be true on most Linux systems.
662
 */
663
#  endif /* strdup */
664

665
#  ifndef strndup
666
/* This is similar to `strdup()`. */
667
char *
668
strndup(const char *str, size_t size)
669
{
670
  char *copy;
671
  size_t len = strlen(str);
672
  if (UNLIKELY(len > size))
673
    len = size;
674
  copy = (char *)REDIRECT_MALLOC_F(len + 1);
675
  if (UNLIKELY(NULL == copy)) {
676
#    ifndef MSWINCE
677
    errno = ENOMEM;
678
#    endif
679
    return NULL;
680
  }
681
  if (LIKELY(len > 0))
682
    BCOPY(str, copy, len);
683
  copy[len] = '\0';
684
  return copy;
685
}
686
#  endif /* !strndup */
687

688
#  undef GC_debug_malloc_replacement
689
#  undef GC_debug_malloc_uncollectable_replacement
690

691
#  ifdef REDIRECT_MALLOC_DEBUG
692
#    define REDIRECT_FREE_F GC_debug_free
693
#    define REDIRECT_FREEZERO_F GC_debug_freezero
694
#  else
695
#    define REDIRECT_FREE_F GC_free
696
#    define REDIRECT_FREEZERO_F GC_freezero
697
#  endif
698

699
void
700
free(void *p)
701
{
702
#  ifdef IGNORE_FREE
703
  UNUSED_ARG(p);
704
#  else
705
#    if defined(REDIR_MALLOC_AND_LINUX_THREADS) \
706
        && !defined(USE_PROC_FOR_LIBRARIES)
707
  /*
708
   * Do not bother with initialization checks.  If nothing has been
709
   * initialized, then the check fails, and that is safe, since we have
710
   * not allocated uncollectible objects neither.
711
   */
712
  ptr_t caller = (ptr_t)__builtin_return_address(0);
713

714
  /*
715
   * This test does not need to ensure memory visibility, since the bounds
716
   * will be set when/if we create another thread.
717
   */
718
  if (ADDR_INSIDE(caller, GC_libld_start, GC_libld_end)
719
#      ifdef HAVE_LIBPTHREAD_SO
720
      || ADDR_INSIDE(caller, GC_libpthread_start, GC_libpthread_end)
721
#      endif
722
  ) {
723
    GC_free(p);
724
    return;
725
  }
726
#    endif
727
  REDIRECT_FREE_F(p);
728
#  endif
729
}
730

731
void
732
freezero(void *p, size_t clear_lb)
733
{
734
  /* We do not expect the caller is in `libdl` or `libpthread`. */
735
#  ifdef IGNORE_FREE
736
  if (UNLIKELY(NULL == p) || UNLIKELY(0 == clear_lb))
737
    return;
738

739
  LOCK();
740
  {
741
    size_t lb;
742
#    ifdef REDIRECT_MALLOC_DEBUG
743
    ptr_t base = (ptr_t)GC_base(p);
744

745
    GC_ASSERT(base != NULL);
746
    lb = HDR(p)->hb_sz - (size_t)((ptr_t)p - base); /*< `sizeof(oh)` */
747
#    else
748
    GC_ASSERT(GC_base(p) == p);
749
    lb = HDR(p)->hb_sz;
750
#    endif
751
    if (LIKELY(clear_lb > lb))
752
      clear_lb = lb;
753
  }
754
  /* Skip deallocation but clear the object. */
755
  UNLOCK();
756
  BZERO(p, clear_lb);
757
#  else
758
  REDIRECT_FREEZERO_F(p, clear_lb);
759
#  endif
760
}
761

762
void
763
freezeroall(void *p)
764
{
765
  freezero(p, GC_SIZE_MAX);
766
}
767

768
#endif /* REDIRECT_MALLOC && !REDIRECT_MALLOC_IN_HEADER */
769

770
GC_INNER void
771
GC_free_internal(void *base, const hdr *hhdr, size_t clear_ofs,
16,855,052✔
772
                 size_t clear_lb)
773
{
774
  size_t lb = hhdr->hb_sz;           /*< size in bytes */
16,855,052✔
775
  size_t lg = BYTES_TO_GRANULES(lb); /*< size in granules */
16,855,052✔
776
  int kind = hhdr->hb_obj_kind;
16,855,052✔
777

778
  GC_ASSERT(I_HOLD_LOCK());
16,855,052✔
779
#ifdef LOG_ALLOCS
780
  GC_log_printf("Free %p after GC #%lu\n", base, (unsigned long)GC_gc_no);
781
#endif
782
  GC_bytes_freed += lb;
16,855,052✔
783
  if (IS_UNCOLLECTABLE(kind))
16,855,052✔
784
    GC_non_gc_bytes -= lb;
8,655,624✔
785

786
  /*
787
   * Ensure the part of object to clear does not overrun the object.
788
   * Note: `SIZET_SAT_ADD(clear_ofs, clear_lb) > lb` cannot be used instead as
789
   * otherwise "memset specified bound exceeds maximum object size" warning
790
   * (a false positive) is reported by gcc-13.
791
   */
792
  if (UNLIKELY(clear_ofs >= GC_SIZE_MAX - clear_lb)
16,855,052✔
793
      || UNLIKELY(clear_ofs + clear_lb > lb))
16,434,818✔
794
    clear_lb = lb > clear_ofs ? lb - clear_ofs : 0;
840,528✔
795

796
  if (LIKELY(lg <= MAXOBJGRANULES)) {
16,855,052✔
797
    struct obj_kind *ok = &GC_obj_kinds[kind];
16,854,964✔
798
    void **flh;
799

800
    if (ok->ok_init && LIKELY(lb > sizeof(ptr_t))) {
16,854,964✔
801
      clear_ofs = sizeof(ptr_t);
6,249,790✔
802
      clear_lb = lb - sizeof(ptr_t);
6,249,790✔
803
    }
804
    if (clear_lb > 0)
16,854,964✔
805
      BZERO((ptr_t)base + clear_ofs, clear_lb);
6,669,832✔
806

807
    /*
808
     * It is unnecessary to clear the mark bit.  If the object is reallocated,
809
     * it does not matter.  Otherwise, the collector will do it, since it is
810
     * on a free list.
811
     */
812

813
    flh = &ok->ok_freelist[lg];
16,854,964✔
814
    obj_link(base) = *flh;
16,854,964✔
815
    *flh = (ptr_t)base;
16,854,964✔
816
  } else {
817
    if (clear_lb > 0)
88✔
NEW
818
      BZERO((ptr_t)base + clear_ofs, clear_lb);
×
819
    if (lb > HBLKSIZE) {
88✔
820
      GC_large_allocd_bytes -= HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb);
84✔
821
    }
822
    GC_ASSERT(ADDR(HBLKPTR(base)) == ADDR(hhdr->hb_block));
88✔
823
    GC_freehblk(hhdr->hb_block);
88✔
824
  }
825
  FREE_PROFILER_HOOK(base);
16,855,052✔
826
}
16,855,052✔
827

828
GC_API void GC_CALL
829
GC_free(void *p)
15,390,996✔
830
{
831
  const hdr *hhdr;
832

833
  if (p /* `!= NULL` */) {
15,390,996✔
834
    /* CPPCHECK */
835
  } else {
836
    /* Required by ANSI.  It is not my fault... */
837
    return;
29,400✔
838
  }
839

840
  LOCK();
15,361,596✔
841
  hhdr = HDR(p);
15,361,596✔
842
#if defined(REDIRECT_MALLOC)                                           \
843
    && ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \
844
        || defined(REDIR_MALLOC_AND_LINUX_THREADS)                     \
845
        || (defined(SOLARIS) && defined(THREADS)) || defined(MSWIN32))
846
  /*
847
   * This might be called indirectly by `GC_print_callers` to free the
848
   * result of `backtrace_symbols()`.  For Solaris, we have to redirect
849
   * `malloc` calls during initialization.  For the others, this seems
850
   * to happen implicitly.  Do not try to deallocate that memory.
851
   */
852
  if (UNLIKELY(NULL == hhdr)) {
853
    UNLOCK();
854
    return;
855
  }
856
#endif
857
  GC_ASSERT(GC_base(p) == p);
15,361,596✔
858
  GC_free_internal(p, hhdr, 0 /* `clear_ofs` */, 0 /* `clear_lb` */);
15,361,596✔
859
  UNLOCK();
15,361,596✔
860
}
861

862
GC_API void GC_CALL
863
GC_freezero(void *p, size_t clear_lb)
1,260,672✔
864
{
865
  if (UNLIKELY(NULL == p))
1,260,672✔
866
    return;
84✔
867

868
  LOCK();
1,260,588✔
869
  GC_ASSERT(GC_base(p) == p);
1,260,588✔
870
  GC_free_internal(p, HDR(p), 0 /* `clear_ofs` */, clear_lb);
1,260,588✔
871
  UNLOCK();
1,260,588✔
872
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc