• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 2095

06 Apr 2026 03:36PM UTC coverage: 80.453% (+0.2%) from 80.276%
2095

push

travis-ci

ivmai
Workaround 'GC_SysVGetDataStart is never used' cppcheck FP

Issue #880 (bdwgc).

* include/private/gcconfig.h [CPPCHECK] (DATASTART_USES_XGETDATASTART):
Undefine (before first definition).
* include/private/gcconfig.h [SPARC && (SOLARIS || DRSNX || LINUX
|| SOLARIS) || S370 && UTS4 || X86_64 && SOLARIS]
(DATASTART_USES_XGETDATASTART): Define (near `DATASTART`).
* include/private/gcconfig.h [SOLARIS || DRSNX || UTS4 || LINUX
&& SPARC] (DATASTART_USES_XGETDATASTART): Do not define near `SVR4`.
* include/private/gcconfig.h [!DYNAMIC_LOADING
&& GC_DONT_REGISTER_MAIN_STATIC_DATA || OPENBSD && CPPCHECK]
(DATASTART_USES_XGETDATASTART): Undefine.

7207 of 8958 relevant lines covered (80.45%)

18837854.17 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.49
/malloc.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5
 * Copyright (c) 2008-2025 Ivan Maidanski
6
 *
7
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9
 *
10
 * Permission is hereby granted to use or copy this program
11
 * for any purpose, provided the above notices are retained on all copies.
12
 * Permission to modify the code and to distribute modified code is granted,
13
 * provided the above notices are retained, and a notice that the code was
14
 * modified is included with the above copyright notice.
15
 */
16

17
#include "private/gc_priv.h"
18

19
#include <string.h>
20

21
/* Allocate reclaim list for the kind.  Returns `TRUE` on success. */
22
STATIC GC_bool
23
GC_alloc_reclaim_list(struct obj_kind *ok)
176✔
24
{
25
  struct hblk **result;
26

27
  GC_ASSERT(I_HOLD_LOCK());
176✔
28
  result = (struct hblk **)GC_scratch_alloc((MAXOBJGRANULES + 1)
176✔
29
                                            * sizeof(struct hblk *));
30
  if (UNLIKELY(NULL == result))
176✔
31
    return FALSE;
×
32

33
  BZERO(result, (MAXOBJGRANULES + 1) * sizeof(struct hblk *));
176✔
34
  ok->ok_reclaim_list = result;
176✔
35
  return TRUE;
176✔
36
}
37

38
/*
39
 * Allocate a large block of size `lb_adjusted` bytes with the requested
40
 * alignment (`align_m1 + 1`).  The block is not cleared.  We assume that
41
 * the size is nonzero and a multiple of `GC_GRANULE_BYTES`, and that
42
 * it already includes `EXTRA_BYTES` value.  The `flags` argument should
43
 * be `IGNORE_OFF_PAGE` or 0.  Calls `GC_allochblk()` to do the actual
44
 * allocation, but also triggers collection and/or heap expansion
45
 * as appropriate.  Updates value of `GC_bytes_allocd`; does also other
46
 * accounting.
47
 */
48
STATIC ptr_t
49
GC_alloc_large(size_t lb_adjusted, int kind, unsigned flags, size_t align_m1)
2,531,373✔
50
{
51
  /*
52
   * TODO: It is unclear which retries limit is sufficient (value of 3 leads
53
   * to fail in some 32-bit applications, 10 is a kind of arbitrary value).
54
   */
55
#define MAX_ALLOCLARGE_RETRIES 10
56

57
  int retry_cnt;
58
  size_t n_blocks; /*< includes alignment */
59
  struct hblk *h;
60
  ptr_t result;
61

62
  GC_ASSERT(I_HOLD_LOCK());
2,531,373✔
63
  if (UNLIKELY(!GC_is_initialized)) {
2,531,373✔
64
    UNLOCK(); /*< just to unset `GC_lock_holder` */
×
65
    GC_init();
×
66
    LOCK();
×
67
  }
68
  GC_ASSERT(lb_adjusted != 0 && (lb_adjusted & (GC_GRANULE_BYTES - 1)) == 0);
2,531,373✔
69
  n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(SIZET_SAT_ADD(lb_adjusted, align_m1));
2,531,373✔
70

71
  /* Do our share of marking work. */
72
  if (GC_incremental && !GC_dont_gc) {
2,531,373✔
73
    GC_collect_a_little_inner(n_blocks);
2,044,378✔
74
  }
75

76
  h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
2,531,373✔
77
#ifdef USE_MUNMAP
78
  if (NULL == h && GC_merge_unmapped()) {
2,531,373✔
79
    h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
6✔
80
  }
81
#endif
82
  for (retry_cnt = 0; NULL == h; retry_cnt++) {
2,534,287✔
83
    /*
84
     * Only a few iterations are expected at most, otherwise something
85
     * is wrong in one of the functions called below.
86
     */
87
    if (retry_cnt > MAX_ALLOCLARGE_RETRIES)
2,934✔
88
      ABORT("Too many retries in GC_alloc_large");
×
89
    if (UNLIKELY(!GC_collect_or_expand(n_blocks, flags, retry_cnt > 0)))
2,934✔
90
      return NULL;
20✔
91
    h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
2,914✔
92
  }
93

94
  GC_bytes_allocd += lb_adjusted;
2,531,353✔
95
  if (lb_adjusted > HBLKSIZE) {
2,531,353✔
96
    GC_large_allocd_bytes += HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted);
1,108,400✔
97
    if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
1,108,400✔
98
      GC_max_large_allocd_bytes = GC_large_allocd_bytes;
5,220✔
99
  }
100
  /* FIXME: Do we need some way to reset `GC_max_large_allocd_bytes`? */
101
  result = h->hb_body;
2,531,353✔
102
  GC_ASSERT((ADDR(result) & align_m1) == 0);
2,531,353✔
103
  return result;
2,531,353✔
104
}
105

106
/*
107
 * Allocate a large block of given size in bytes, clear it if appropriate.
108
 * We assume that the size is nonzero and a multiple of `GC_GRANULE_BYTES`,
109
 * and that it already includes `EXTRA_BYTES` value.  Update value of
110
 * `GC_bytes_allocd`.
111
 */
112
STATIC ptr_t
113
GC_alloc_large_and_clear(size_t lb_adjusted, int kind, unsigned flags)
751✔
114
{
115
  ptr_t result;
116

117
  GC_ASSERT(I_HOLD_LOCK());
751✔
118
  result = GC_alloc_large(lb_adjusted, kind, flags, 0 /* `align_m1` */);
751✔
119
  if (LIKELY(result != NULL)
751✔
120
      && (GC_debugging_started || GC_obj_kinds[kind].ok_init)) {
751✔
121
    /* Clear the whole block, in case of `GC_realloc` call. */
122
    BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted));
751✔
123
  }
124
  return result;
751✔
125
}
126

127
/*
128
 * Fill in additional entries in `GC_size_map`, including the `i`-th one.
129
 * Note that a filled in section of the array ending at `n` always has
130
 * the length of at least `n / 4`.
131
 */
132
STATIC void
133
GC_extend_size_map(size_t i)
142✔
134
{
135
  size_t original_lg = ALLOC_REQUEST_GRANS(i);
142✔
136
  size_t lg;
137
  /*
138
   * The size we try to preserve.  Close to `i`, unless this would
139
   * introduce too many distinct sizes.
140
   */
141
  size_t byte_sz = GRANULES_TO_BYTES(original_lg);
142✔
142
  size_t smaller_than_i = byte_sz - (byte_sz >> 3);
142✔
143
  /* The lowest indexed entry we initialize. */
144
  size_t low_limit;
145
  size_t number_of_objs;
146

147
  GC_ASSERT(I_HOLD_LOCK());
142✔
148
  GC_ASSERT(0 == GC_size_map[i]);
142✔
149
  if (0 == GC_size_map[smaller_than_i]) {
142✔
150
    low_limit = byte_sz - (byte_sz >> 2); /*< much smaller than `i` */
77✔
151
    lg = original_lg;
77✔
152
    while (GC_size_map[low_limit] != 0)
2,383✔
153
      low_limit++;
2,306✔
154
  } else {
155
    low_limit = smaller_than_i + 1;
65✔
156
    while (GC_size_map[low_limit] != 0)
3,822✔
157
      low_limit++;
3,757✔
158

159
    lg = ALLOC_REQUEST_GRANS(low_limit);
65✔
160
    lg += lg >> 3;
65✔
161
    if (lg < original_lg)
65✔
162
      lg = original_lg;
×
163
  }
164

165
  /*
166
   * For these larger sizes, we use an even number of granules.
167
   * This makes it easier to, e.g., construct a 16-byte-aligned
168
   * allocator even if `GC_GRANULE_BYTES` is 8.
169
   */
170
  lg = (lg + 1) & ~(size_t)1;
142✔
171
  if (lg > MAXOBJGRANULES)
142✔
172
    lg = MAXOBJGRANULES;
×
173

174
  /* If we can fit the same number of larger objects in a block, do so. */
175
  GC_ASSERT(lg != 0);
142✔
176
  number_of_objs = HBLK_GRANULES / lg;
142✔
177
  GC_ASSERT(number_of_objs != 0);
142✔
178
  lg = (HBLK_GRANULES / number_of_objs) & ~(size_t)1;
142✔
179

180
  /*
181
   * We may need one extra byte; do not always fill in
182
   * `GC_size_map[byte_sz]`.
183
   */
184
  byte_sz = GRANULES_TO_BYTES(lg) - EXTRA_BYTES;
142✔
185

186
  for (; low_limit <= byte_sz; low_limit++)
41,950✔
187
    GC_size_map[low_limit] = lg;
41,808✔
188
}
142✔
189

190
STATIC void *
191
GC_generic_malloc_inner_small(size_t lb, int kind)
8,878,943✔
192
{
193
  struct obj_kind *ok = &GC_obj_kinds[kind];
8,878,943✔
194
  size_t lg = GC_size_map[lb];
8,878,943✔
195
  void **opp = &ok->ok_freelist[lg];
8,878,943✔
196
  void *op = *opp;
8,878,943✔
197

198
  GC_ASSERT(I_HOLD_LOCK());
8,878,943✔
199
  if (UNLIKELY(NULL == op)) {
8,878,943✔
200
    if (0 == lg) {
1,054,941✔
201
      if (UNLIKELY(!GC_is_initialized)) {
142✔
202
        UNLOCK(); /*< just to unset `GC_lock_holder` */
×
203
        GC_init();
×
204
        LOCK();
×
205
        lg = GC_size_map[lb];
×
206
      }
207
      if (0 == lg) {
142✔
208
        GC_extend_size_map(lb);
142✔
209
        lg = GC_size_map[lb];
142✔
210
        GC_ASSERT(lg != 0);
142✔
211
      }
212
      /* Retry. */
213
      opp = &ok->ok_freelist[lg];
142✔
214
      op = *opp;
142✔
215
    }
216
    if (NULL == op) {
1,054,941✔
217
      if (NULL == ok->ok_reclaim_list && !GC_alloc_reclaim_list(ok))
1,054,940✔
218
        return NULL;
×
219
      op = GC_allocobj(lg, kind);
1,054,940✔
220
      if (NULL == op)
1,054,940✔
221
        return NULL;
×
222
    }
223
  }
224
  *opp = obj_link(op);
8,878,943✔
225
  obj_link(op) = NULL;
8,878,943✔
226
  GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
8,878,943✔
227
  return op;
8,878,943✔
228
}
229

230
GC_INNER void *
231
GC_generic_malloc_inner(size_t lb, int kind, unsigned flags)
6,131,183✔
232
{
233
  size_t lb_adjusted;
234

235
  GC_ASSERT(I_HOLD_LOCK());
6,131,183✔
236
  GC_ASSERT(kind < MAXOBJKINDS);
6,131,183✔
237
  if (SMALL_OBJ(lb)) {
6,131,183✔
238
    return GC_generic_malloc_inner_small(lb, kind);
6,130,432✔
239
  }
240

241
#if MAX_EXTRA_BYTES > 0
242
  if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
751✔
243
    /* No need to add `EXTRA_BYTES`. */
244
    lb_adjusted = lb;
531✔
245
  } else
246
#endif
247
  /* else */ {
248
    lb_adjusted = ADD_EXTRA_BYTES(lb);
220✔
249
  }
250
  return GC_alloc_large_and_clear(ROUNDUP_GRANULE_SIZE(lb_adjusted), kind,
751✔
251
                                  flags);
252
}
253

254
#ifdef GC_COLLECT_AT_MALLOC
255
size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
256
#endif
257

258
GC_INNER void *
259
GC_generic_malloc_aligned(size_t lb, int kind, unsigned flags, size_t align_m1)
5,246,096✔
260
{
261
  void *result;
262

263
  GC_ASSERT(kind < MAXOBJKINDS);
5,246,096✔
264
  if (UNLIKELY(get_have_errors()))
5,246,096✔
265
    GC_print_all_errors();
74✔
266
  GC_notify_or_invoke_finalizers();
5,246,096✔
267
  GC_DBG_COLLECT_AT_MALLOC(lb);
268
  if (SMALL_OBJ(lb) && LIKELY(align_m1 < GC_GRANULE_BYTES)) {
5,246,096✔
269
    LOCK();
2,715,474✔
270
    result = GC_generic_malloc_inner_small(lb, kind);
2,715,474✔
271
    UNLOCK();
2,715,474✔
272
  } else {
273
    size_t lg;
274
    size_t lb_adjusted;
275
    GC_bool init;
276

277
#if MAX_EXTRA_BYTES > 0
278
    if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
2,530,622✔
279
      /* No need to add `EXTRA_BYTES`. */
280
      lb_adjusted = ROUNDUP_GRANULE_SIZE(lb);
630,002✔
281
#  ifdef THREADS
282
      lg = BYTES_TO_GRANULES(lb_adjusted);
630,002✔
283
#  endif
284
    } else
285
#endif
286
    /* else */ {
287
      if (UNLIKELY(0 == lb))
1,900,620✔
288
        lb = 1;
×
289
      lg = ALLOC_REQUEST_GRANS(lb);
1,900,620✔
290
      lb_adjusted = GRANULES_TO_BYTES(lg);
1,900,620✔
291
    }
292

293
    init = GC_obj_kinds[kind].ok_init;
2,530,622✔
294
    if (LIKELY(align_m1 < GC_GRANULE_BYTES)) {
2,530,622✔
295
      align_m1 = 0;
2,530,268✔
296
    } else if (align_m1 < HBLKSIZE) {
354✔
297
      align_m1 = HBLKSIZE - 1;
228✔
298
    }
299
    LOCK();
2,530,622✔
300
    result = GC_alloc_large(lb_adjusted, kind, flags, align_m1);
2,530,622✔
301
    if (LIKELY(result != NULL)) {
2,530,622✔
302
      if (GC_debugging_started
2,530,602✔
303
#ifndef THREADS
304
          || init
305
#endif
306
      ) {
307
        BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted));
4✔
308
      } else {
309
#ifdef THREADS
310
        GC_ASSERT(GRANULES_TO_PTRS(lg) >= 2);
2,530,598✔
311
        /*
312
         * Clear any memory that might be used for the GC descriptors
313
         * before we release the allocator lock.
314
         */
315
        ((ptr_t *)result)[0] = NULL;
2,530,598✔
316
        ((ptr_t *)result)[1] = NULL;
2,530,598✔
317
        ((ptr_t *)result)[GRANULES_TO_PTRS(lg) - 1] = NULL;
2,530,598✔
318
        ((ptr_t *)result)[GRANULES_TO_PTRS(lg) - 2] = NULL;
2,530,598✔
319
#endif
320
      }
321
    }
322
    UNLOCK();
2,530,622✔
323
#ifdef THREADS
324
    if (init && !GC_debugging_started && result != NULL) {
2,530,622✔
325
      /* Clear the rest (i.e. excluding the initial 2 words). */
326
      BZERO((ptr_t *)result + 2,
2,434,530✔
327
            HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted) - 2 * sizeof(ptr_t));
328
    }
329
#endif
330
  }
331
  if (UNLIKELY(NULL == result)) {
5,246,096✔
332
    result = (*GC_get_oom_fn())(lb);
20✔
333
    /* Note: result might be misaligned. */
334
  }
335
  return result;
5,246,096✔
336
}
337

338
GC_API GC_ATTR_MALLOC void *GC_CALL
339
GC_generic_malloc(size_t lb, int kind)
642,667✔
340
{
341
  return GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */,
642,667✔
342
                                   0 /* `align_m1` */);
343
}
344

345
GC_API GC_ATTR_MALLOC void *GC_CALL
346
GC_malloc_kind_global(size_t lb, int kind)
13,311,811✔
347
{
348
  return GC_malloc_kind_aligned_global(lb, kind, 0 /* `align_m1` */);
13,311,811✔
349
}
350

351
GC_INNER void *
352
GC_malloc_kind_aligned_global(size_t lb, int kind, size_t align_m1)
13,312,630✔
353
{
354
  GC_ASSERT(kind < MAXOBJKINDS);
13,312,630✔
355
  if (SMALL_OBJ(lb) && LIKELY(align_m1 < HBLKSIZE / 2)) {
13,312,630✔
356
    void *op;
357
    void **opp;
358
    size_t lg;
359

360
    GC_DBG_COLLECT_AT_MALLOC(lb);
361
    LOCK();
11,412,049✔
362
    lg = GC_size_map[lb];
11,412,049✔
363
    opp = &GC_obj_kinds[kind].ok_freelist[lg];
11,412,049✔
364
    op = *opp;
11,412,049✔
365
    if (UNLIKELY(align_m1 >= GC_GRANULE_BYTES)) {
11,412,049✔
366
      /* TODO: Avoid linear search. */
367
      for (; (ADDR(op) & align_m1) != 0; op = *opp) {
11,127✔
368
        opp = &obj_link(op);
10,623✔
369
      }
370
    }
371
    if (LIKELY(op != NULL)) {
11,412,049✔
372
      GC_ASSERT(PTRFREE == kind || NULL == obj_link(op)
10,599,207✔
373
                || (ADDR(obj_link(op)) < GC_greatest_real_heap_addr
374
                    && GC_least_real_heap_addr < ADDR(obj_link(op))));
375
      *opp = obj_link(op);
10,599,207✔
376
      if (kind != PTRFREE)
10,599,207✔
377
        obj_link(op) = NULL;
10,349,585✔
378
      GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
10,599,207✔
379
      UNLOCK();
10,599,207✔
380
      GC_ASSERT((ADDR(op) & align_m1) == 0);
10,599,207✔
381
      return op;
10,599,207✔
382
    }
383
    UNLOCK();
812,842✔
384
  }
385

386
  /*
387
   * We make the `GC_clear_stack()` call a tail one, hoping to get more
388
   * of the stack.
389
   */
390
  return GC_clear_stack(
2,713,423✔
391
      GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */, align_m1));
392
}
393

394
#if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
395
GC_API GC_ATTR_MALLOC void *GC_CALL
396
GC_malloc_kind(size_t lb, int kind)
397
{
398
  return GC_malloc_kind_global(lb, kind);
399
}
400
#endif
401

402
GC_API GC_ATTR_MALLOC void *GC_CALL
403
GC_malloc_atomic(size_t lb)
41,136,265✔
404
{
405
  /* Allocate `lb` bytes of atomic (pointer-free) data. */
406
  return GC_malloc_kind(lb, PTRFREE);
41,136,265✔
407
}
408

409
GC_API GC_ATTR_MALLOC void *GC_CALL
410
GC_malloc(size_t lb)
153,771,824✔
411
{
412
  /* Allocate `lb` bytes of composite (pointer-containing) data. */
413
  return GC_malloc_kind(lb, NORMAL);
153,771,824✔
414
}
415

416
GC_API GC_ATTR_MALLOC void *GC_CALL
417
GC_generic_malloc_uncollectable(size_t lb, int kind)
8,668,415✔
418
{
419
  void *op;
420
  size_t lb_orig = lb;
8,668,415✔
421

422
  GC_ASSERT(kind < MAXOBJKINDS);
8,668,415✔
423
  if (EXTRA_BYTES != 0 && LIKELY(lb != 0)) {
8,668,415✔
424
    /*
425
     * We do not need the extra byte, since this will not be collected
426
     * anyway.
427
     */
428
    lb--;
8,668,352✔
429
  }
430

431
  if (SMALL_OBJ(lb)) {
17,336,830✔
432
    void **opp;
433
    size_t lg;
434

435
    if (UNLIKELY(get_have_errors()))
8,668,415✔
436
      GC_print_all_errors();
×
437
    GC_notify_or_invoke_finalizers();
8,668,415✔
438
    GC_DBG_COLLECT_AT_MALLOC(lb_orig);
439
    LOCK();
8,668,415✔
440
    lg = GC_size_map[lb];
8,668,415✔
441
    opp = &GC_obj_kinds[kind].ok_freelist[lg];
8,668,415✔
442
    op = *opp;
8,668,415✔
443
    if (LIKELY(op != NULL)) {
8,668,415✔
444
      *opp = obj_link(op);
8,635,378✔
445
      obj_link(op) = NULL;
8,635,378✔
446
      GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
8,635,378✔
447
      /*
448
       * Mark bit was already set on free list.  It will be cleared only
449
       * temporarily during a collection, as a result of the normal
450
       * free-list mark bit clearing.
451
       */
452
      GC_non_gc_bytes += GRANULES_TO_BYTES((word)lg);
8,635,378✔
453
    } else {
454
      op = GC_generic_malloc_inner_small(lb, kind);
33,037✔
455
      if (NULL == op) {
33,037✔
456
        GC_oom_func oom_fn = GC_oom_fn;
×
457
        UNLOCK();
×
458
        return (*oom_fn)(lb_orig);
×
459
      }
460
      /* For small objects, the free lists are completely marked. */
461
    }
462
    GC_ASSERT(GC_is_marked(op));
8,668,415✔
463
    UNLOCK();
8,668,415✔
464
  } else {
465
    op = GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */,
×
466
                                   0 /* `align_m1` */);
467
    if (op != NULL) {
×
468
      hdr *hhdr;
469

470
      GC_ASSERT(HBLKDISPL(op) == 0); /*< large block */
×
471
      LOCK();
×
472
      hhdr = HDR(op);
×
473
      set_mark_bit_from_hdr(hhdr, 0); /*< the only object */
×
474
#ifndef THREADS
475
      /*
476
       * This is not guaranteed in the multi-threaded case because the
477
       * counter could be updated before locking.
478
       */
479
      GC_ASSERT(0 == hhdr->hb_n_marks);
480
#endif
481
      hhdr->hb_n_marks = 1;
×
482
      UNLOCK();
×
483
    }
484
  }
485
  return op;
8,668,415✔
486
}
487

488
GC_API GC_ATTR_MALLOC void *GC_CALL
489
GC_malloc_uncollectable(size_t lb)
3,025,341✔
490
{
491
  /*
492
   * Allocate `lb` bytes of pointer-containing, traced, but not collectible
493
   * data.
494
   */
495
  return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE);
3,025,341✔
496
}
497

498
#ifdef GC_ATOMIC_UNCOLLECTABLE
499
GC_API GC_ATTR_MALLOC void *GC_CALL
500
GC_malloc_atomic_uncollectable(size_t lb)
5,642,948✔
501
{
502
  return GC_generic_malloc_uncollectable(lb, AUNCOLLECTABLE);
5,642,948✔
503
}
504
#endif /* GC_ATOMIC_UNCOLLECTABLE */
505

506
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
507

508
#  ifndef MSWINCE
509
#    include <errno.h>
510
#  endif
511

512
#  ifdef REDIRECT_MALLOC_DEBUG
513
#    include "private/dbg_mlc.h"
514
#    ifndef REDIRECT_MALLOC_UNCOLLECTABLE
515
#      define REDIRECT_MALLOC_F(lb) \
516
        GC_debug_malloc_inner(lb, TRUE /* `is_redirect` */, GC_DBG_EXTRAS)
517
#    else
518
#      define REDIRECT_MALLOC_F(lb) \
519
        GC_debug_malloc_uncollectable_inner(lb, TRUE, GC_DBG_EXTRAS)
520
#    endif
521
#  elif defined(REDIRECT_MALLOC_UNCOLLECTABLE)
522
#    define REDIRECT_MALLOC_F GC_malloc_uncollectable
523
#  else
524
#    define REDIRECT_MALLOC_F GC_malloc
525
#  endif
526

527
void *
528
malloc(size_t lb)
529
{
530
  /*
531
   * It might help to manually inline the `GC_malloc` call here.
532
   * But any decent compiler should reduce the extra procedure call
533
   * to at most a jump instruction in this case.
534
   */
535
  return REDIRECT_MALLOC_F(lb);
536
}
537

538
#  if defined(REDIR_MALLOC_AND_LINUX_THREADS)                    \
539
      && (defined(IGNORE_FREE) || defined(REDIRECT_MALLOC_DEBUG) \
540
          || !defined(REDIRECT_MALLOC_UNCOLLECTABLE))
541
#    ifdef HAVE_LIBPTHREAD_SO
542
STATIC ptr_t GC_libpthread_start = NULL;
543
STATIC ptr_t GC_libpthread_end = NULL;
544
#    endif
545
STATIC ptr_t GC_libld_start = NULL;
546
STATIC ptr_t GC_libld_end = NULL;
547
static GC_bool lib_bounds_set = FALSE;
548

549
GC_INNER void
550
GC_init_lib_bounds(void)
551
{
552
  IF_CANCEL(int cancel_state;)
553

554
  /*
555
   * This test does not need to ensure memory visibility, since the bounds
556
   * will be set when/if we create another thread.
557
   */
558
  if (LIKELY(lib_bounds_set))
559
    return;
560

561
  DISABLE_CANCEL(cancel_state);
562
  GC_init(); /*< if not called yet */
563

564
#    if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
565
  LOCK(); /*< just to set `GC_lock_holder` */
566
#    endif
567
#    ifdef HAVE_LIBPTHREAD_SO
568
  if (!GC_text_mapping("libpthread-", &GC_libpthread_start,
569
                       &GC_libpthread_end)) {
570
    WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
571
    /*
572
     * This might still work with some versions of `libpthread`,
573
     * so we do not `abort`.
574
     */
575
  }
576
#    endif
577
  if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
578
    WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
579
  }
580
#    if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
581
  UNLOCK();
582
#    endif
583
  RESTORE_CANCEL(cancel_state);
584
  lib_bounds_set = TRUE;
585
}
586
#  endif
587

588
void *
589
calloc(size_t n, size_t lb)
590
{
591
  if (UNLIKELY((lb | n) > GC_SQRT_SIZE_MAX) /*< fast initial test */
592
      && lb && n > GC_SIZE_MAX / lb)
593
    return (*GC_get_oom_fn())(GC_SIZE_MAX); /*< `n * lb` overflow */
594
#  ifdef REDIR_MALLOC_AND_LINUX_THREADS
595
#    if defined(REDIRECT_MALLOC_DEBUG) \
596
        || !defined(REDIRECT_MALLOC_UNCOLLECTABLE)
597
  /*
598
   * The linker may allocate some memory that is only pointed to by
599
   * memory-mapped thread stacks.  Make sure it is not collectible.
600
   */
601
  {
602
    ptr_t caller = (ptr_t)__builtin_return_address(0);
603

604
    GC_init_lib_bounds();
605
    if (ADDR_INSIDE(caller, GC_libld_start, GC_libld_end)
606
#      ifdef HAVE_LIBPTHREAD_SO
607
        /*
608
         * Note: the two ranges are actually usually adjacent, so there
609
         * may be a way to speed this up.
610
         */
611
        || ADDR_INSIDE(caller, GC_libpthread_start, GC_libpthread_end)
612
#      endif
613
    ) {
614
      return GC_generic_malloc_uncollectable(n * lb, UNCOLLECTABLE);
615
    }
616
  }
617
#    elif defined(IGNORE_FREE)
618
  /* Just to ensure `static` variables used by `free()` are initialized. */
619
  GC_init_lib_bounds();
620
#    endif
621
#  endif
622
  return REDIRECT_MALLOC_F(n * lb);
623
}
624

625
#  ifndef strdup
626
char *
627
strdup(const char *s)
628
{
629
  size_t lb = strlen(s) + 1;
630
  char *result = (char *)REDIRECT_MALLOC_F(lb);
631

632
  if (UNLIKELY(NULL == result)) {
633
#    ifndef MSWINCE
634
    errno = ENOMEM;
635
#    endif
636
    return NULL;
637
  }
638
  BCOPY(s, result, lb);
639
  return result;
640
}
641
#  else
642
/*
643
 * If `strdup` is macro defined, we assume that it actually calls `malloc`,
644
 * and thus the right thing will happen even without overriding it.
645
 * This seems to be true on most Linux systems.
646
 */
647
#  endif /* strdup */
648

649
#  ifndef strndup
650
/* This is similar to `strdup()`. */
651
char *
652
strndup(const char *str, size_t size)
653
{
654
  char *copy;
655
  size_t len = strlen(str);
656
  if (UNLIKELY(len > size))
657
    len = size;
658
  copy = (char *)REDIRECT_MALLOC_F(len + 1);
659
  if (UNLIKELY(NULL == copy)) {
660
#    ifndef MSWINCE
661
    errno = ENOMEM;
662
#    endif
663
    return NULL;
664
  }
665
  if (LIKELY(len > 0))
666
    BCOPY(str, copy, len);
667
  copy[len] = '\0';
668
  return copy;
669
}
670
#  endif /* !strndup */
671

672
#  ifdef REDIRECT_MALLOC_DEBUG
673
#    define REDIRECT_FREE_F GC_debug_free
674
#    define REDIRECT_FREEZERO_F GC_debug_freezero
675
#  else
676
#    define REDIRECT_FREE_F GC_free
677
#    define REDIRECT_FREEZERO_F GC_freezero
678
#  endif
679

680
void
681
free(void *p)
682
{
683
#  if defined(REDIR_MALLOC_AND_LINUX_THREADS) \
684
      && !defined(USE_PROC_FOR_LIBRARIES)     \
685
      && (defined(REDIRECT_MALLOC_DEBUG) || defined(IGNORE_FREE))
686
  /*
687
   * Do not bother with initialization checks.  If nothing has been
688
   * initialized, then the check fails, and that is safe, since we have
689
   * not allocated uncollectible objects neither.
690
   */
691
  ptr_t caller = (ptr_t)__builtin_return_address(0);
692

693
  /*
694
   * This test does not need to ensure memory visibility, since the bounds
695
   * will be set when/if we create another thread.
696
   */
697
  if (ADDR_INSIDE(caller, GC_libld_start, GC_libld_end)
698
#    ifdef HAVE_LIBPTHREAD_SO
699
      || ADDR_INSIDE(caller, GC_libpthread_start, GC_libpthread_end)
700
#    endif
701
  ) {
702
    GC_free(p);
703
    return;
704
  }
705
#  endif
706
#  ifdef IGNORE_FREE
707
  UNUSED_ARG(p);
708
#  else
709
  REDIRECT_FREE_F(p);
710
#  endif
711
}
712

713
EXTERN_C_BEGIN
714
extern void freezero(void *p, size_t clear_lb);
715
extern void freezeroall(void *p);
716
EXTERN_C_END
717

718
void
719
freezero(void *p, size_t clear_lb)
720
{
721
  /* We do not expect the caller is in `libdl` or `libpthread`. */
722
#  ifdef IGNORE_FREE
723
  if (UNLIKELY(NULL == p) || UNLIKELY(0 == clear_lb))
724
    return;
725

726
  LOCK();
727
  {
728
    size_t lb;
729
#    ifdef REDIRECT_MALLOC_DEBUG
730
    ptr_t base = (ptr_t)GC_base(p);
731

732
    GC_ASSERT(base != NULL);
733
    lb = HDR(p)->hb_sz - (size_t)((ptr_t)p - base); /*< `sizeof(oh)` */
734
#    else
735
    GC_ASSERT(GC_base(p) == p);
736
    lb = HDR(p)->hb_sz;
737
#    endif
738
    if (LIKELY(clear_lb > lb))
739
      clear_lb = lb;
740
  }
741
  /* Skip deallocation but clear the object. */
742
  UNLOCK();
743
  BZERO(p, clear_lb);
744
#  else
745
  REDIRECT_FREEZERO_F(p, clear_lb);
746
#  endif
747
}
748

749
void
750
freezeroall(void *p)
751
{
752
  freezero(p, GC_SIZE_MAX);
753
}
754

755
#endif /* REDIRECT_MALLOC && !REDIRECT_MALLOC_IN_HEADER */
756

757
GC_INNER void
758
GC_free_internal(void *base, const hdr *hhdr, size_t clear_ofs,
18,280,430✔
759
                 size_t clear_lb)
760
{
761
  size_t lb = hhdr->hb_sz;           /*< size in bytes */
18,280,430✔
762
  size_t lg = BYTES_TO_GRANULES(lb); /*< size in granules */
18,280,430✔
763
  int kind = hhdr->hb_obj_kind;
18,280,430✔
764

765
  GC_ASSERT(I_HOLD_LOCK());
18,280,430✔
766
#ifdef LOG_ALLOCS
767
  GC_log_printf("Free %p after GC #%lu\n", base, (unsigned long)GC_gc_no);
768
#endif
769
  GC_bytes_freed += lb;
18,280,430✔
770
  if (IS_UNCOLLECTABLE(kind))
18,280,430✔
771
    GC_non_gc_bytes -= lb;
8,655,750✔
772

773
  /*
774
   * Ensure the part of object to clear does not overrun the object.
775
   * Note: `SIZET_SAT_ADD(clear_ofs, clear_lb) > lb` cannot be used instead as
776
   * otherwise "memset specified bound exceeds maximum object size" warning
777
   * (a false positive) is reported by gcc-13.
778
   */
779
  if (UNLIKELY(clear_ofs >= GC_SIZE_MAX - clear_lb)
18,280,430✔
780
      || UNLIKELY(clear_ofs + clear_lb > lb))
17,650,091✔
781
    clear_lb = lb > clear_ofs ? lb - clear_ofs : 0;
1,260,780✔
782

783
  if (LIKELY(lg <= MAXOBJGRANULES)) {
18,280,430✔
784
    struct obj_kind *ok = &GC_obj_kinds[kind];
18,280,300✔
785
    void **flh;
786

787
    if (ok->ok_init && LIKELY(lb > sizeof(ptr_t))) {
18,280,300✔
788
      clear_ofs = sizeof(ptr_t);
6,835,080✔
789
      clear_lb = lb - sizeof(ptr_t);
6,835,080✔
790
    }
791
    if (clear_lb > 0)
18,280,300✔
792
      BZERO((ptr_t)base + clear_ofs, clear_lb);
7,465,143✔
793

794
    /*
795
     * It is unnecessary to clear the mark bit.  If the object is reallocated,
796
     * it does not matter.  Otherwise, the collector will do it, since it is
797
     * on a free list.
798
     */
799

800
    flh = &ok->ok_freelist[lg];
18,280,300✔
801
    obj_link(base) = *flh;
18,280,300✔
802
    *flh = (ptr_t)base;
18,280,300✔
803
  } else {
804
    if (clear_lb > 0)
130✔
805
      BZERO((ptr_t)base + clear_ofs, clear_lb);
×
806
    if (lb > HBLKSIZE) {
130✔
807
      GC_large_allocd_bytes -= HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb);
126✔
808
    }
809
    GC_ASSERT(ADDR(HBLKPTR(base)) == ADDR(hhdr->hb_block));
130✔
810
    GC_freehblk(hhdr->hb_block);
130✔
811
  }
812
  FREE_PROFILER_HOOK(base);
18,280,430✔
813
}
18,280,430✔
814

815
GC_API void GC_CALL
816
GC_free(void *p)
16,207,100✔
817
{
818
  const hdr *hhdr;
819

820
  if (UNLIKELY(NULL == p)) {
16,207,100✔
821
    /* Required by ANSI. */
822
    return;
29,652✔
823
  }
824

825
  LOCK();
16,177,448✔
826
  hhdr = HDR(p);
16,177,448✔
827
#if defined(REDIRECT_MALLOC)                                           \
828
    && ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \
829
        || defined(REDIR_MALLOC_AND_LINUX_THREADS) || defined(MSWIN32))
830
  /*
831
   * This might be called indirectly by `GC_print_callers` to free the
832
   * result of `backtrace_symbols()`.  For the other cases, this seems
833
   * to happen implicitly.  Do not try to deallocate that memory.
834
   */
835
  if (UNLIKELY(NULL == hhdr)) {
836
    UNLOCK();
837
    return;
838
  }
839
#endif
840
  GC_ASSERT(GC_base(p) == p);
16,177,448✔
841
  GC_free_internal(p, hhdr, 0 /* `clear_ofs` */, 0 /* `clear_lb` */);
16,177,448✔
842
  UNLOCK();
16,177,448✔
843
}
844

845
GC_API void GC_CALL
846
GC_freezero(void *p, size_t clear_lb)
1,891,008✔
847
{
848
  if (UNLIKELY(NULL == p))
1,891,008✔
849
    return;
126✔
850

851
  LOCK();
1,890,882✔
852
  GC_ASSERT(GC_base(p) == p);
1,890,882✔
853
  GC_free_internal(p, HDR(p), 0 /* `clear_ofs` */, clear_lb);
1,890,882✔
854
  UNLOCK();
1,890,882✔
855
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc