• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1465

20 Mar 2023 06:58AM UTC coverage: 76.199% (-0.2%) from 76.448%
1465

push

travis-ci-com

ivmai
Fix description of client promise for IGNORE_OFF_PAGE allocated objects
(documentation)

The client should keep a pointer within the first heap block for such
an allocated object.  Previously, it was mentioned in the documentation
and comments that such a pointer should be within the first 256 or 512
bytes.

* README.md (The C Interface to the Allocator): Fix the description
(comment) of GC_malloc_ignore_off_page (the client promises to keep
a pointer within the first hblk of the object instead of 256 or 512
bytes, or a page).
* docs/gcinterface.md (GC_MALLOC_IGNORE_OFF_PAGE): Likewise.
* gc.man (GC_malloc_atomic_ignore_off_page): Likewise.
* include/gc/gc.h (GC_malloc_ignore_off_page): Likewise.
* include/gc/gc_mark.h (GC_generic_malloc_ignore_off_page): Likewise.
* include/private/gc_priv.h (IGNORE_OFF_PAGE): Likewise.
* include/private/gc_priv.h [DBG_HDRS_ALL || GC_GCJ_SUPPORT
|| !GC_NO_FINALIZATION] (GC_generic_malloc_inner_ignore_off_page):
Likewise.
* malloc.c [DBG_HDRS_ALL || GC_GCJ_SUPPORT || !GC_NO_FINALIZATION]
(GC_generic_malloc_inner_ignore_off_page): Likewise.
* include/gc/gc_gcj.h (GC_gcj_malloc_ignore_off_page): Refine comment.

7751 of 10172 relevant lines covered (76.2%)

8682664.47 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.95
/malloc.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5
 * Copyright (c) 2008-2022 Ivan Maidanski
6
 *
7
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9
 *
10
 * Permission is hereby granted to use or copy this program
11
 * for any purpose, provided the above notices are retained on all copies.
12
 * Permission to modify the code and to distribute modified code is granted,
13
 * provided the above notices are retained, and a notice that the code was
14
 * modified is included with the above copyright notice.
15
 */
16

17
#include "private/gc_priv.h"
18

19
#include <string.h>
20

21
/* Allocate reclaim list for the kind.  Returns TRUE on success.        */
22
STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
154✔
23
{
24
    struct hblk ** result;
25

26
    GC_ASSERT(I_HOLD_LOCK());
154✔
27
    result = (struct hblk **)GC_scratch_alloc(
154✔
28
                                (MAXOBJGRANULES+1) * sizeof(struct hblk *));
29
    if (EXPECT(NULL == result, FALSE)) return FALSE;
154✔
30

31
    BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
154✔
32
    kind -> ok_reclaim_list = result;
154✔
33
    return TRUE;
154✔
34
}
35

36
GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags,
1,607,509✔
37
                              size_t align_m1)
38
{
39
    struct hblk * h;
40
    size_t n_blocks; /* includes alignment */
41
    ptr_t result = NULL;
1,607,509✔
42
    GC_bool retry = FALSE;
1,607,509✔
43

44
    GC_ASSERT(I_HOLD_LOCK());
1,607,509✔
45
    lb = ROUNDUP_GRANULE_SIZE(lb);
1,607,509✔
46
    n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(SIZET_SAT_ADD(lb, align_m1));
1,607,509✔
47
    if (!EXPECT(GC_is_initialized, TRUE)) {
1,607,509✔
48
      UNLOCK(); /* just to unset GC_lock_holder */
×
49
      GC_init();
×
50
      LOCK();
×
51
    }
52
    /* Do our share of marking work.    */
53
    if (GC_incremental && !GC_dont_gc) {
1,607,509✔
54
            ENTER_GC();
1,365,125✔
55
            GC_collect_a_little_inner((int)n_blocks);
1,365,125✔
56
            EXIT_GC();
1,365,125✔
57
    }
58

59
    h = GC_allochblk(lb, k, flags, align_m1);
1,607,509✔
60
#   ifdef USE_MUNMAP
61
        if (NULL == h) {
1,607,509✔
62
            GC_merge_unmapped();
2,977✔
63
            h = GC_allochblk(lb, k, flags, align_m1);
2,977✔
64
        }
65
#   endif
66
    while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
3,217,979✔
67
        h = GC_allochblk(lb, k, flags, align_m1);
2,961✔
68
        retry = TRUE;
2,961✔
69
    }
70
    if (EXPECT(h != NULL, TRUE)) {
1,607,509✔
71
        if (lb > HBLKSIZE) {
1,607,491✔
72
            GC_large_allocd_bytes += HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb);
443,324✔
73
            if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
443,324✔
74
                GC_max_large_allocd_bytes = GC_large_allocd_bytes;
6,974✔
75
        }
76
        /* FIXME: Do we need some way to reset GC_max_large_allocd_bytes? */
77
        result = h -> hb_body;
1,607,491✔
78
        GC_ASSERT(((word)result & align_m1) == 0);
1,607,491✔
79
    }
80
    return result;
1,607,509✔
81
}
82

83
/* Allocate a large block of size lb bytes.  Clear if appropriate.      */
84
/* EXTRA_BYTES were already added to lb.                                */
85
STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
682✔
86
{
87
    ptr_t result;
88

89
    GC_ASSERT(I_HOLD_LOCK());
682✔
90
    result = GC_alloc_large(lb, k, flags, 0 /* align_m1 */);
682✔
91
    if (EXPECT(result != NULL, TRUE)
682✔
92
          && (GC_debugging_started || GC_obj_kinds[k].ok_init)) {
682✔
93
        /* Clear the whole block, in case of GC_realloc call. */
94
        BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb));
682✔
95
    }
96
    return result;
682✔
97
}
98

99
/* Fill in additional entries in GC_size_map, including the i-th one.   */
100
/* Note that a filled in section of the array ending at n always        */
101
/* has the length of at least n/4.                                      */
102
STATIC void GC_extend_size_map(size_t i)
198✔
103
{
104
  size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
198✔
105
  size_t granule_sz;
106
  size_t byte_sz = GRANULES_TO_BYTES(orig_granule_sz);
198✔
107
                        /* The size we try to preserve.         */
108
                        /* Close to i, unless this would        */
109
                        /* introduce too many distinct sizes.   */
110
  size_t smaller_than_i = byte_sz - (byte_sz >> 3);
198✔
111
  size_t low_limit; /* The lowest indexed entry we initialize.  */
112
  size_t number_of_objs;
113

114
  GC_ASSERT(I_HOLD_LOCK());
198✔
115
  GC_ASSERT(0 == GC_size_map[i]);
198✔
116
  if (0 == GC_size_map[smaller_than_i]) {
198✔
117
    low_limit = byte_sz - (byte_sz >> 2); /* much smaller than i */
90✔
118
    granule_sz = orig_granule_sz;
90✔
119
    while (GC_size_map[low_limit] != 0)
4,820✔
120
      low_limit++;
4,640✔
121
  } else {
122
    low_limit = smaller_than_i + 1;
108✔
123
    while (GC_size_map[low_limit] != 0)
6,188✔
124
      low_limit++;
5,972✔
125

126
    granule_sz = ROUNDED_UP_GRANULES(low_limit);
108✔
127
    granule_sz += granule_sz >> 3;
108✔
128
    if (granule_sz < orig_granule_sz)
108✔
129
      granule_sz = orig_granule_sz;
×
130
  }
131

132
  /* For these larger sizes, we use an even number of granules.         */
133
  /* This makes it easier to, e.g., construct a 16-byte-aligned         */
134
  /* allocator even if GRANULE_BYTES is 8.                              */
135
  granule_sz = (granule_sz + 1) & ~1;
198✔
136
  if (granule_sz > MAXOBJGRANULES)
198✔
137
    granule_sz = MAXOBJGRANULES;
×
138

139
  /* If we can fit the same number of larger objects in a block, do so. */
140
  number_of_objs = HBLK_GRANULES / granule_sz;
198✔
141
  GC_ASSERT(number_of_objs != 0);
198✔
142
  granule_sz = (HBLK_GRANULES / number_of_objs) & ~1;
198✔
143

144
  byte_sz = GRANULES_TO_BYTES(granule_sz) - EXTRA_BYTES;
198✔
145
                        /* We may need one extra byte; do not always    */
146
                        /* fill in GC_size_map[byte_sz].                */
147

148
  for (; low_limit <= byte_sz; low_limit++)
59,906✔
149
    GC_size_map[low_limit] = granule_sz;
59,708✔
150
}
198✔
151

152
/* Allocate lb bytes for an object of kind k.           */
153
/* Should not be used to directly to allocate objects   */
154
/* that require special handling on allocation.         */
155
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
11,886,435✔
156
{
157
    void *op;
158

159
    GC_ASSERT(I_HOLD_LOCK());
11,886,435✔
160
    GC_ASSERT(k < MAXOBJKINDS);
11,886,435✔
161
    if (SMALL_OBJ(lb)) {
23,772,542✔
162
        struct obj_kind * kind = GC_obj_kinds + k;
11,886,107✔
163
        size_t lg = GC_size_map[lb];
11,886,107✔
164
        void ** opp = &(kind -> ok_freelist[lg]);
11,886,107✔
165

166
        op = *opp;
11,886,107✔
167
        if (EXPECT(0 == op, FALSE)) {
11,886,107✔
168
          if (lg == 0) {
895,309✔
169
            if (!EXPECT(GC_is_initialized, TRUE)) {
198✔
170
              UNLOCK(); /* just to unset GC_lock_holder */
×
171
              GC_init();
×
172
              LOCK();
×
173
              lg = GC_size_map[lb];
×
174
            }
175
            if (0 == lg) {
198✔
176
              GC_extend_size_map(lb);
198✔
177
              lg = GC_size_map[lb];
198✔
178
              GC_ASSERT(lg != 0);
198✔
179
            }
180
            /* Retry */
181
            opp = &(kind -> ok_freelist[lg]);
198✔
182
            op = *opp;
198✔
183
          }
184
          if (0 == op) {
895,309✔
185
            if (0 == kind -> ok_reclaim_list &&
895,463✔
186
                !GC_alloc_reclaim_list(kind))
154✔
187
              return NULL;
×
188
            op = GC_allocobj(lg, k);
895,309✔
189
            if (0 == op)
895,309✔
190
              return NULL;
×
191
          }
192
        }
193
        *opp = obj_link(op);
11,886,107✔
194
        obj_link(op) = 0;
11,886,107✔
195
        GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
11,886,107✔
196
    } else {
197
        size_t lb_adjusted = ADD_SLOP(lb);
328✔
198

199
        op = (ptr_t)GC_alloc_large_and_clear(lb_adjusted, k, 0 /* flags */);
328✔
200
        if (op != NULL)
328✔
201
            GC_bytes_allocd += lb_adjusted;
328✔
202
    }
203

204
    return op;
11,886,435✔
205
}
206

207
#if defined(DBG_HDRS_ALL) || defined(GC_GCJ_SUPPORT) \
208
    || !defined(GC_NO_FINALIZATION)
209
  /* Allocate a composite object of size n bytes.  The caller           */
210
  /* guarantees that pointers past the first hblk are not relevant.     */
211
  GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
1,192✔
212
  {
213
    size_t lb_adjusted;
214
    void * op;
215

216
    GC_ASSERT(I_HOLD_LOCK());
1,192✔
217
    if (lb <= HBLKSIZE)
1,192✔
218
        return GC_generic_malloc_inner(lb, k);
838✔
219
    GC_ASSERT(k < MAXOBJKINDS);
354✔
220
    lb_adjusted = ADD_SLOP(lb);
354✔
221
    op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
354✔
222
    if (EXPECT(op != NULL, TRUE)) {
354✔
223
        GC_bytes_allocd += lb_adjusted;
354✔
224
    }
225
    return op;
354✔
226
  }
227
#endif
228

229
#ifdef GC_COLLECT_AT_MALLOC
230
  /* Parameter to force GC at every malloc of size greater or equal to  */
231
  /* the given value.  This might be handy during debugging.            */
232
# if defined(CPPCHECK)
233
    size_t GC_dbg_collect_at_malloc_min_lb = 16*1024; /* e.g. */
234
# else
235
    size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
236
# endif
237
#endif
238

239
GC_INNER void * GC_generic_malloc_aligned(size_t lb, int k, size_t align_m1)
3,007,316✔
240
{
241
    void * result;
242

243
    GC_ASSERT(k < MAXOBJKINDS);
3,007,316✔
244
    if (EXPECT(get_have_errors(), FALSE))
3,007,316✔
245
      GC_print_all_errors();
106✔
246
    GC_INVOKE_FINALIZERS();
3,007,316✔
247
    GC_DBG_COLLECT_AT_MALLOC(lb);
248
    if (SMALL_OBJ(lb) && EXPECT(align_m1 < GRANULE_BYTES, TRUE)) {
3,007,288✔
249
        LOCK();
1,400,511✔
250
        result = GC_generic_malloc_inner(lb, k);
1,400,987✔
251
        UNLOCK();
1,400,603✔
252
    } else {
253
        size_t lg;
254
        size_t lb_rounded;
255
        GC_bool init;
256

257
        lg = ROUNDED_UP_GRANULES(lb);
1,606,777✔
258
        lb_rounded = GRANULES_TO_BYTES(lg);
1,606,777✔
259
        init = GC_obj_kinds[k].ok_init;
1,606,777✔
260
        if (EXPECT(align_m1 < GRANULE_BYTES, TRUE)) {
1,606,777✔
261
          align_m1 = 0;
1,606,525✔
262
        } else if (align_m1 < HBLKSIZE) {
252✔
263
          align_m1 = HBLKSIZE - 1;
168✔
264
        }
265
        LOCK();
1,606,777✔
266
        result = (ptr_t)GC_alloc_large(lb_rounded, k, 0 /* flags */, align_m1);
1,606,832✔
267
        if (EXPECT(result != NULL, TRUE)) {
1,606,827✔
268
          if (GC_debugging_started) {
1,606,809✔
269
            BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_rounded));
4✔
270
          } else {
271
#           ifdef THREADS
272
              /* Clear any memory that might be used for GC descriptors */
273
              /* before we release the lock.                            */
274
                ((word *)result)[0] = 0;
1,606,805✔
275
                ((word *)result)[1] = 0;
1,606,805✔
276
                ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
1,606,805✔
277
                ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
1,606,805✔
278
#           endif
279
          }
280
          GC_bytes_allocd += lb_rounded;
1,606,809✔
281
        }
282
        UNLOCK();
1,606,827✔
283
        if (init && !GC_debugging_started && 0 != result) {
1,606,565✔
284
            BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_rounded));
1,526,776✔
285
        }
286
    }
287
    if (EXPECT(NULL == result, FALSE))
3,007,168✔
288
      result = (*GC_get_oom_fn())(lb); /* might be misaligned */
18✔
289
    return result;
3,007,168✔
290
}
291

292
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k)
3,007,116✔
293
{
294
    return GC_generic_malloc_aligned(lb, k, 0 /* align_m1 */);
3,007,116✔
295
}
296

297
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
11,262,900✔
298
{
299
    GC_ASSERT(k < MAXOBJKINDS);
11,262,900✔
300
    if (SMALL_OBJ(lb)) {
11,262,900✔
301
        void *op;
302
        void **opp;
303
        size_t lg;
304

305
        GC_DBG_COLLECT_AT_MALLOC(lb);
306
        LOCK();
9,656,564✔
307
        lg = GC_size_map[lb];
9,656,992✔
308
        opp = &GC_obj_kinds[k].ok_freelist[lg];
9,656,992✔
309
        op = *opp;
9,656,992✔
310
        if (EXPECT(op != NULL, TRUE)) {
9,656,992✔
311
            if (k == PTRFREE) {
9,154,545✔
312
                *opp = obj_link(op);
165,558✔
313
            } else {
314
                GC_ASSERT(0 == obj_link(op)
8,988,987✔
315
                          || ((word)obj_link(op)
316
                                <= (word)GC_greatest_plausible_heap_addr
317
                              && (word)obj_link(op)
318
                                >= (word)GC_least_plausible_heap_addr));
319
                *opp = obj_link(op);
8,988,987✔
320
                obj_link(op) = 0;
8,988,987✔
321
            }
322
            GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
9,154,545✔
323
            UNLOCK();
9,154,545✔
324
            return op;
9,154,518✔
325
        }
326
        UNLOCK();
502,447✔
327
    }
328

329
    /* We make the GC_clear_stack() call a tail one, hoping to get more */
330
    /* of the stack.                                                    */
331
    return GC_clear_stack(GC_generic_malloc(lb, k));
2,108,780✔
332
}
333

334
#if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
335
  GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t lb, int k)
336
  {
337
    return GC_malloc_kind_global(lb, k);
338
  }
339
#endif
340

341
/* Allocate lb bytes of atomic (pointer-free) data.     */
342
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t lb)
28,830,487✔
343
{
344
    return GC_malloc_kind(lb, PTRFREE);
28,830,487✔
345
}
346

347
/* Allocate lb bytes of composite (pointerful) data.    */
348
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t lb)
124,784,831✔
349
{
350
    return GC_malloc_kind(lb, NORMAL);
124,784,831✔
351
}
352

353
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc_uncollectable(
50,696✔
354
                                                        size_t lb, int k)
355
{
356
    void *op;
357

358
    GC_ASSERT(k < MAXOBJKINDS);
50,696✔
359
    if (SMALL_OBJ(lb)) {
101,392✔
360
        void **opp;
361
        size_t lg;
362

363
        GC_DBG_COLLECT_AT_MALLOC(lb);
364
        if (EXTRA_BYTES != 0 && lb != 0) lb--;
50,696✔
365
                  /* We don't need the extra byte, since this won't be  */
366
                  /* collected anyway.                                  */
367
        LOCK();
50,696✔
368
        lg = GC_size_map[lb];
50,696✔
369
        opp = &GC_obj_kinds[k].ok_freelist[lg];
50,696✔
370
        op = *opp;
50,696✔
371
        if (EXPECT(op != NULL, TRUE)) {
50,696✔
372
            *opp = obj_link(op);
50,365✔
373
            obj_link(op) = 0;
50,365✔
374
            GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
50,365✔
375
            /* Mark bit was already set on free list.  It will be       */
376
            /* cleared only temporarily during a collection, as a       */
377
            /* result of the normal free list mark bit clearing.        */
378
            GC_non_gc_bytes += GRANULES_TO_BYTES((word)lg);
50,365✔
379
            UNLOCK();
50,365✔
380
        } else {
381
            UNLOCK();
331✔
382
            op = GC_generic_malloc(lb, k);
331✔
383
            /* For small objects, the free lists are completely marked. */
384
        }
385
        GC_ASSERT(0 == op || GC_is_marked(op));
50,696✔
386
    } else {
387
      op = GC_generic_malloc(lb, k);
×
388
      if (op /* != NULL */) { /* CPPCHECK */
×
389
        hdr * hhdr = HDR(op);
×
390

391
        GC_ASSERT(HBLKDISPL(op) == 0); /* large block */
×
392
        /* We don't need the lock here, since we have an undisguised    */
393
        /* pointer.  We do need to hold the lock while we adjust        */
394
        /* mark bits.                                                   */
395
        LOCK();
×
396
        set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
×
397
#       ifndef THREADS
398
          GC_ASSERT(hhdr -> hb_n_marks == 0);
399
                /* This is not guaranteed in the multi-threaded case    */
400
                /* because the counter could be updated before locking. */
401
#       endif
402
        hhdr -> hb_n_marks = 1;
×
403
        UNLOCK();
×
404
      }
405
    }
406
    return op;
50,696✔
407
}
408

409
/* Allocate lb bytes of pointerful, traced, but not collectible data.   */
410
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb)
50,612✔
411
{
412
  return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE);
50,612✔
413
}
414

415
#ifdef GC_ATOMIC_UNCOLLECTABLE
416
  /* Allocate lb bytes of pointer-free, untraced, uncollectible data    */
417
  /* This is normally roughly equivalent to the system malloc.          */
418
  /* But it may be useful if malloc is redefined.                       */
419
  GC_API GC_ATTR_MALLOC void * GC_CALL
420
        GC_malloc_atomic_uncollectable(size_t lb)
×
421
  {
422
    return GC_generic_malloc_uncollectable(lb, AUNCOLLECTABLE);
×
423
  }
424
#endif /* GC_ATOMIC_UNCOLLECTABLE */
425

426
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
427

428
# ifndef MSWINCE
429
#   include <errno.h>
430
# endif
431

432
  /* Avoid unnecessary nested procedure calls here, by #defining some   */
433
  /* malloc replacements.  Otherwise we end up saving a meaningless     */
434
  /* return address in the object.  It also speeds things up, but it is */
435
  /* admittedly quite ugly.                                             */
436
# define GC_debug_malloc_replacement(lb) GC_debug_malloc(lb, GC_DBG_EXTRAS)
437

438
# if defined(CPPCHECK)
439
#   define REDIRECT_MALLOC_F GC_malloc /* e.g. */
440
# else
441
#   define REDIRECT_MALLOC_F REDIRECT_MALLOC
442
# endif
443

444
  void * malloc(size_t lb)
445
  {
446
    /* It might help to manually inline the GC_malloc call here.        */
447
    /* But any decent compiler should reduce the extra procedure call   */
448
    /* to at most a jump instruction in this case.                      */
449
#   if defined(I386) && defined(GC_SOLARIS_THREADS)
450
      /* Thread initialization can call malloc before we are ready for. */
451
      /* It is not clear that this is enough to help matters.           */
452
      /* The thread implementation may well call malloc at other        */
453
      /* inopportune times.                                             */
454
      if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
455
#   endif
456
    return (void *)REDIRECT_MALLOC_F(lb);
457
  }
458

459
# if defined(GC_LINUX_THREADS)
460
#   ifdef HAVE_LIBPTHREAD_SO
461
      STATIC ptr_t GC_libpthread_start = NULL;
462
      STATIC ptr_t GC_libpthread_end = NULL;
463
#   endif
464
    STATIC ptr_t GC_libld_start = NULL;
465
    STATIC ptr_t GC_libld_end = NULL;
466

467
    STATIC void GC_init_lib_bounds(void)
468
    {
469
      IF_CANCEL(int cancel_state;)
470

471
      DISABLE_CANCEL(cancel_state);
472
      GC_init(); /* if not called yet */
473
#     ifdef HAVE_LIBPTHREAD_SO
474
        if (!GC_text_mapping("libpthread-",
475
                             &GC_libpthread_start, &GC_libpthread_end)) {
476
          WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
477
          /* This might still work with some versions of libpthread,    */
478
          /* so we do not abort.                                        */
479
        }
480
#     endif
481
      if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
482
          WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
483
      }
484
      RESTORE_CANCEL(cancel_state);
485
    }
486
# endif /* GC_LINUX_THREADS */
487

488
  void * calloc(size_t n, size_t lb)
489
  {
490
    if (EXPECT((lb | n) > GC_SQRT_SIZE_MAX, FALSE) /* fast initial test */
491
        && lb && n > GC_SIZE_MAX / lb)
492
      return (*GC_get_oom_fn())(GC_SIZE_MAX); /* n*lb overflow */
493
#   if defined(GC_LINUX_THREADS)
494
      /* The linker may allocate some memory that is only pointed to by */
495
      /* mmapped thread stacks.  Make sure it is not collectible.       */
496
      {
497
        static GC_bool lib_bounds_set = FALSE;
498
        ptr_t caller = (ptr_t)__builtin_return_address(0);
499

500
        /* This test does not need to ensure memory visibility, since   */
501
        /* the bounds will be set when/if we create another thread.     */
502
        if (!EXPECT(lib_bounds_set, TRUE)) {
503
          GC_init_lib_bounds();
504
          lib_bounds_set = TRUE;
505
        }
506
        if (((word)caller >= (word)GC_libld_start
507
             && (word)caller < (word)GC_libld_end)
508
#           ifdef HAVE_LIBPTHREAD_SO
509
              || ((word)caller >= (word)GC_libpthread_start
510
                  && (word)caller < (word)GC_libpthread_end)
511
                    /* The two ranges are actually usually adjacent,    */
512
                    /* so there may be a way to speed this up.          */
513
#           endif
514
           ) {
515
          return GC_generic_malloc_uncollectable(n * lb, UNCOLLECTABLE);
516
        }
517
      }
518
#   endif
519
    return (void *)REDIRECT_MALLOC_F(n * lb);
520
  }
521

522
# ifndef strdup
523
    char *strdup(const char *s)
524
    {
525
      size_t lb = strlen(s) + 1;
526
      char *result = (char *)REDIRECT_MALLOC_F(lb);
527

528
      if (EXPECT(NULL == result, FALSE)) {
529
        errno = ENOMEM;
530
        return NULL;
531
      }
532
      BCOPY(s, result, lb);
533
      return result;
534
    }
535
# endif /* !defined(strdup) */
536
 /* If strdup is macro defined, we assume that it actually calls malloc, */
537
 /* and thus the right thing will happen even without overriding it.     */
538
 /* This seems to be true on most Linux systems.                         */
539

540
# ifndef strndup
541
    /* This is similar to strdup().     */
542
    char *strndup(const char *str, size_t size)
543
    {
544
      char *copy;
545
      size_t len = strlen(str);
546
      if (EXPECT(len > size, FALSE))
547
        len = size;
548
      copy = (char *)REDIRECT_MALLOC_F(len + 1);
549
      if (EXPECT(NULL == copy, FALSE)) {
550
        errno = ENOMEM;
551
        return NULL;
552
      }
553
      if (EXPECT(len > 0, TRUE))
554
        BCOPY(str, copy, len);
555
      copy[len] = '\0';
556
      return copy;
557
    }
558
# endif /* !strndup */
559

560
# undef GC_debug_malloc_replacement
561

562
#endif /* REDIRECT_MALLOC */
563

564
/* Explicitly deallocate the object.  hhdr should correspond to p.      */
565
static void free_internal(void *p, hdr *hhdr)
4,631,661✔
566
{
567
  size_t sz = (size_t)(hhdr -> hb_sz); /* in bytes */
4,631,661✔
568
  size_t ngranules = BYTES_TO_GRANULES(sz); /* size in granules */
4,631,661✔
569
  int knd = hhdr -> hb_obj_kind;
4,631,661✔
570

571
  GC_bytes_freed += sz;
4,631,661✔
572
  if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
4,631,661✔
573
  if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
4,631,661✔
574
    struct obj_kind *ok = &GC_obj_kinds[knd];
4,631,573✔
575
    void **flh;
576

577
    /* It is unnecessary to clear the mark bit.  If the object is       */
578
    /* reallocated, it does not matter.  Otherwise, the collector will  */
579
    /* do it, since it is on a free list.                               */
580
    if (ok -> ok_init && EXPECT(sz > sizeof(word), TRUE)) {
4,631,573✔
581
      BZERO((word *)p + 1, sz - sizeof(word));
2,798,294✔
582
    }
583

584
    flh = &(ok -> ok_freelist[ngranules]);
4,631,573✔
585
    obj_link(p) = *flh;
4,631,573✔
586
    *flh = (ptr_t)p;
4,631,573✔
587
  } else {
588
    if (sz > HBLKSIZE) {
88✔
589
      GC_large_allocd_bytes -= HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
84✔
590
    }
591
    GC_freehblk(HBLKPTR(p));
88✔
592
  }
593
}
4,631,661✔
594

595
GC_API void GC_CALL GC_free(void * p)
4,567,385✔
596
{
597
    hdr *hhdr;
598

599
    if (p /* != NULL */) {
4,567,385✔
600
        /* CPPCHECK */
601
    } else {
602
        /* Required by ANSI.  It's not my fault ...     */
603
        return;
29,443✔
604
    }
605

606
#   ifdef LOG_ALLOCS
607
      GC_log_printf("GC_free(%p) after GC #%lu\n",
608
                    p, (unsigned long)GC_gc_no);
609
#   endif
610
    hhdr = HDR(p);
4,537,942✔
611
#   if defined(REDIRECT_MALLOC) && \
612
        ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \
613
         || defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
614
         || defined(MSWIN32))
615
        /* This might be called indirectly by GC_print_callers to free  */
616
        /* the result of backtrace_symbols.                             */
617
        /* For Solaris, we have to redirect malloc calls during         */
618
        /* initialization.  For the others, this seems to happen        */
619
        /* implicitly.                                                  */
620
        /* Don't try to deallocate that memory.                         */
621
        if (EXPECT(NULL == hhdr, FALSE)) return;
622
#   endif
623
    GC_ASSERT(GC_base(p) == p);
4,538,127✔
624
    LOCK();
4,538,163✔
625
    free_internal(p, hhdr);
4,538,322✔
626
    UNLOCK();
4,538,348✔
627
}
628

629
#ifdef THREADS
630
  GC_INNER void GC_free_inner(void * p)
93,312✔
631
  {
632
    GC_ASSERT(I_HOLD_LOCK());
93,312✔
633
    free_internal(p, HDR(p));
93,312✔
634
  }
93,312✔
635
#endif /* THREADS */
636

637
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
638
# define REDIRECT_FREE GC_free
639
#endif
640

641
#if defined(REDIRECT_FREE) && !defined(REDIRECT_MALLOC_IN_HEADER)
642

643
# if defined(CPPCHECK)
644
#   define REDIRECT_FREE_F GC_free /* e.g. */
645
# else
646
#   define REDIRECT_FREE_F REDIRECT_FREE
647
# endif
648

649
  void free(void * p)
650
  {
651
#   ifndef IGNORE_FREE
652
#     if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
653
        /* Don't bother with initialization checks.  If nothing         */
654
        /* has been initialized, the check fails, and that's safe,      */
655
        /* since we have not allocated uncollectible objects neither.   */
656
        ptr_t caller = (ptr_t)__builtin_return_address(0);
657
        /* This test does not need to ensure memory visibility, since   */
658
        /* the bounds will be set when/if we create another thread.     */
659
        if (((word)caller >= (word)GC_libld_start
660
             && (word)caller < (word)GC_libld_end)
661
#           ifdef HAVE_LIBPTHREAD_SO
662
              || ((word)caller >= (word)GC_libpthread_start
663
                  && (word)caller < (word)GC_libpthread_end)
664
#           endif
665
           ) {
666
          GC_free(p);
667
          return;
668
        }
669
#     endif
670
      REDIRECT_FREE_F(p);
671
#   endif
672
  }
673
#endif /* REDIRECT_FREE */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc