• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1470

31 Mar 2023 06:11PM UTC coverage: 72.922% (+0.2%) from 72.713%
1470

push

travis-ci-com

ivmai
Eliminate 'ISO C++17 does not allow register specifier' gcc warning

* tools/setjmp_t.c [__cplusplus>=201703L] (main): Do not use register
specifier for x local variable.

7220 of 9901 relevant lines covered (72.92%)

10343907.65 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.71
/malloc.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5
 *
6
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
8
 *
9
 * Permission is hereby granted to use or copy this program
10
 * for any purpose,  provided the above notices are retained on all copies.
11
 * Permission to modify the code and to distribute modified code is granted,
12
 * provided the above notices are retained, and a notice that the code was
13
 * modified is included with the above copyright notice.
14
 */
15

16
#include "private/gc_priv.h"
17
#include "gc_inline.h" /* for GC_malloc_kind */
18

19
#include <stdio.h>
20
#include <string.h>
21

22
/* Allocate reclaim list for kind:      */
23
/* Return TRUE on success               */
24
STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
152✔
25
{
26
    struct hblk ** result = (struct hblk **)
152✔
27
                GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
28
    if (result == 0) return(FALSE);
152✔
29
    BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
152✔
30
    kind -> ok_reclaim_list = result;
152✔
31
    return(TRUE);
152✔
32
}
33

34
/* Allocate a large block of size lb bytes.  The block is not cleared.  */
35
/* flags argument should be 0 or IGNORE_OFF_PAGE.  EXTRA_BYTES value    */
36
/* was already added to lb.                                             */
37
GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
1,558,047✔
38
{
39
    struct hblk * h;
40
    word n_blocks;
41
    ptr_t result;
42
    GC_bool retry = FALSE;
1,558,047✔
43

44
    GC_ASSERT(I_HOLD_LOCK());
1,558,047✔
45
    lb = ROUNDUP_GRANULE_SIZE(lb);
1,558,047✔
46
    n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(lb);
1,558,047✔
47
    if (!EXPECT(GC_is_initialized, TRUE)) {
1,558,047✔
48
      DCL_LOCK_STATE;
49
      UNLOCK(); /* just to unset GC_lock_holder */
×
50
      GC_init();
×
51
      LOCK();
×
52
    }
53
    /* Do our share of marking work */
54
        if (GC_incremental && !GC_dont_gc) {
1,558,047✔
55
            ENTER_GC();
1,477,977✔
56
            GC_collect_a_little_inner((int)n_blocks);
1,477,977✔
57
            EXIT_GC();
1,477,977✔
58
        }
59
    h = GC_allochblk(lb, k, flags);
1,558,047✔
60
#   ifdef USE_MUNMAP
61
        if (0 == h) {
1,558,047✔
62
            GC_merge_unmapped();
2,067✔
63
            h = GC_allochblk(lb, k, flags);
2,067✔
64
        }
65
#   endif
66
    while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
3,118,147✔
67
        h = GC_allochblk(lb, k, flags);
2,053✔
68
        retry = TRUE;
2,053✔
69
    }
70
    if (h == 0) {
1,558,047✔
71
        result = 0;
18✔
72
    } else {
73
        size_t total_bytes = n_blocks * HBLKSIZE;
1,558,029✔
74
        if (n_blocks > 1) {
1,558,029✔
75
            GC_large_allocd_bytes += total_bytes;
469,733✔
76
            if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
469,733✔
77
                GC_max_large_allocd_bytes = GC_large_allocd_bytes;
5,921✔
78
        }
79
        /* FIXME: Do we need some way to reset GC_max_large_allocd_bytes? */
80
        result = h -> hb_body;
1,558,029✔
81
    }
82
    return result;
1,558,047✔
83
}
84

85
/* Allocate a large block of size lb bytes.  Clear if appropriate.      */
86
/* EXTRA_BYTES were already added to lb.                                */
87
STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
660✔
88
{
89
    ptr_t result;
90

91
    GC_ASSERT(I_HOLD_LOCK());
660✔
92
    result = GC_alloc_large(lb, k, flags);
660✔
93
    if (result != NULL
660✔
94
          && (GC_debugging_started || GC_obj_kinds[k].ok_init)) {
660✔
95
        word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
660✔
96

97
        /* Clear the whole block, in case of GC_realloc call. */
98
        BZERO(result, n_blocks * HBLKSIZE);
660✔
99
    }
100
    return result;
660✔
101
}
102

103
/* Fill in additional entries in GC_size_map, including the i-th one.   */
104
/* Note that a filled in section of the array ending at n always        */
105
/* has the length of at least n/4.                                      */
106
STATIC void GC_extend_size_map(size_t i)
250✔
107
{
108
  size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
250✔
109
  size_t granule_sz;
110
  size_t byte_sz = GRANULES_TO_BYTES(orig_granule_sz);
250✔
111
                        /* The size we try to preserve.         */
112
                        /* Close to i, unless this would        */
113
                        /* introduce too many distinct sizes.   */
114
  size_t smaller_than_i = byte_sz - (byte_sz >> 3);
250✔
115
  size_t low_limit; /* The lowest indexed entry we initialize.  */
116
  size_t number_of_objs;
117

118
  GC_ASSERT(I_HOLD_LOCK());
250✔
119
  GC_ASSERT(0 == GC_size_map[i]);
250✔
120
  if (0 == GC_size_map[smaller_than_i]) {
250✔
121
    low_limit = byte_sz - (byte_sz >> 2); /* much smaller than i */
113✔
122
    granule_sz = orig_granule_sz;
113✔
123
    while (GC_size_map[low_limit] != 0)
4,762✔
124
      low_limit++;
4,536✔
125
  } else {
126
    low_limit = smaller_than_i + 1;
137✔
127
    while (GC_size_map[low_limit] != 0)
7,819✔
128
      low_limit++;
7,545✔
129

130
    granule_sz = ROUNDED_UP_GRANULES(low_limit);
137✔
131
    granule_sz += granule_sz >> 3;
137✔
132
    if (granule_sz < orig_granule_sz)
137✔
133
      granule_sz = orig_granule_sz;
×
134
  }
135

136
  /* For these larger sizes, we use an even number of granules.         */
137
  /* This makes it easier to, e.g., construct a 16-byte-aligned         */
138
  /* allocator even if GRANULE_BYTES is 8.                              */
139
  granule_sz = (granule_sz + 1) & ~1;
250✔
140
  if (granule_sz > MAXOBJGRANULES)
250✔
141
    granule_sz = MAXOBJGRANULES;
×
142

143
  /* If we can fit the same number of larger objects in a block, do so. */
144
  number_of_objs = HBLK_GRANULES / granule_sz;
250✔
145
  GC_ASSERT(number_of_objs != 0);
250✔
146
  granule_sz = (HBLK_GRANULES / number_of_objs) & ~1;
250✔
147

148
  byte_sz = GRANULES_TO_BYTES(granule_sz) - EXTRA_BYTES;
250✔
149
                        /* We may need one extra byte; do not always    */
150
                        /* fill in GC_size_map[byte_sz].                */
151

152
  for (; low_limit <= byte_sz; low_limit++)
63,128✔
153
    GC_size_map[low_limit] = granule_sz;
62,878✔
154
}
250✔
155

156
/* Allocate lb bytes for an object of kind k.           */
157
/* Should not be used to directly allocate objects      */
158
/* that require special handling on allocation.         */
159
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
19,315,290✔
160
{
161
    void *op;
162

163
    GC_ASSERT(I_HOLD_LOCK());
19,315,290✔
164
    GC_ASSERT(k < MAXOBJKINDS);
19,315,290✔
165
    if (SMALL_OBJ(lb)) {
38,630,276✔
166
        struct obj_kind * kind = GC_obj_kinds + k;
19,314,986✔
167
        size_t lg = GC_size_map[lb];
19,314,986✔
168
        void ** opp = &(kind -> ok_freelist[lg]);
19,314,986✔
169

170
        op = *opp;
19,314,986✔
171
        if (EXPECT(0 == op, FALSE)) {
19,314,986✔
172
          if (lg == 0) {
986,108✔
173
            if (!EXPECT(GC_is_initialized, TRUE)) {
250✔
174
              DCL_LOCK_STATE;
175
              UNLOCK(); /* just to unset GC_lock_holder */
×
176
              GC_init();
×
177
              LOCK();
×
178
              lg = GC_size_map[lb];
×
179
            }
180
            if (0 == lg) {
250✔
181
              GC_extend_size_map(lb);
250✔
182
              lg = GC_size_map[lb];
250✔
183
              GC_ASSERT(lg != 0);
250✔
184
            }
185
            /* Retry */
186
            opp = &(kind -> ok_freelist[lg]);
250✔
187
            op = *opp;
250✔
188
          }
189
          if (0 == op) {
986,108✔
190
            if (0 == kind -> ok_reclaim_list &&
986,260✔
191
                !GC_alloc_reclaim_list(kind))
152✔
192
              return NULL;
×
193
            op = GC_allocobj(lg, k);
986,108✔
194
            if (0 == op)
986,108✔
195
              return NULL;
×
196
          }
197
        }
198
        *opp = obj_link(op);
19,314,986✔
199
        obj_link(op) = 0;
19,314,986✔
200
        GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
19,314,986✔
201
    } else {
202
        size_t lb_adjusted = ADD_SLOP(lb);
304✔
203

204
        op = (ptr_t)GC_alloc_large_and_clear(lb_adjusted, k, 0 /* flags */);
304✔
205
        if (op != NULL)
304✔
206
            GC_bytes_allocd += lb_adjusted;
304✔
207
    }
208

209
    return op;
19,315,290✔
210
}
211

212
#if defined(DBG_HDRS_ALL) || defined(GC_GCJ_SUPPORT) \
213
    || !defined(GC_NO_FINALIZATION)
214
  /* Allocate a composite object of size n bytes.  The caller           */
215
  /* guarantees that pointers past the first hblk are not relevant.     */
216
  GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
1,582✔
217
  {
218
    size_t lb_adjusted;
219
    void * op;
220

221
    GC_ASSERT(I_HOLD_LOCK());
1,582✔
222
    if (lb <= HBLKSIZE)
1,582✔
223
        return GC_generic_malloc_inner(lb, k);
1,226✔
224
    GC_ASSERT(k < MAXOBJKINDS);
356✔
225
    lb_adjusted = ADD_SLOP(lb);
356✔
226
    op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
356✔
227
    if (op != NULL)
356✔
228
        GC_bytes_allocd += lb_adjusted;
356✔
229
    return op;
356✔
230
  }
231
#endif
232

233
#ifdef GC_COLLECT_AT_MALLOC
234
  /* Parameter to force GC at every malloc of size greater or equal to  */
235
  /* the given value.  This might be handy during debugging.            */
236
# if defined(CPPCHECK)
237
    size_t GC_dbg_collect_at_malloc_min_lb = 16*1024; /* e.g. */
238
# else
239
    size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
240
# endif
241
#endif
242

243
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k)
2,997,115✔
244
{
245
    void * result;
246
    DCL_LOCK_STATE;
247

248
    GC_ASSERT(k < MAXOBJKINDS);
2,997,115✔
249
    if (EXPECT(get_have_errors(), FALSE))
2,997,115✔
250
      GC_print_all_errors();
27✔
251
    GC_INVOKE_FINALIZERS();
2,997,115✔
252
    GC_DBG_COLLECT_AT_MALLOC(lb);
253
    if (SMALL_OBJ(lb)) {
2,997,344✔
254
        LOCK();
1,440,006✔
255
        result = GC_generic_malloc_inner(lb, k);
1,440,026✔
256
        UNLOCK();
1,440,115✔
257
    } else {
258
        size_t lg;
259
        size_t lb_rounded;
260
        word n_blocks;
261
        GC_bool init;
262

263
        lg = ROUNDED_UP_GRANULES(lb);
1,557,338✔
264
        lb_rounded = GRANULES_TO_BYTES(lg);
1,557,338✔
265
        n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
1,557,338✔
266
        init = GC_obj_kinds[k].ok_init;
1,557,338✔
267
        LOCK();
1,557,338✔
268
        result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
1,557,368✔
269
        if (0 != result) {
1,557,387✔
270
          if (GC_debugging_started
1,557,369✔
271
#             ifndef THREADS
272
                || init
273
#             endif
274
             ) {
275
            BZERO(result, n_blocks * HBLKSIZE);
×
276
          } else {
277
#           ifdef THREADS
278
              /* Clear any memory that might be used for GC descriptors */
279
              /* before we release the lock.                            */
280
                ((word *)result)[0] = 0;
1,557,369✔
281
                ((word *)result)[1] = 0;
1,557,369✔
282
                ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
1,557,369✔
283
                ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
1,557,369✔
284
#           endif
285
          }
286
          GC_bytes_allocd += lb_rounded;
1,557,369✔
287
        }
288
        UNLOCK();
1,557,387✔
289
#       ifdef THREADS
290
          if (init && !GC_debugging_started && result != NULL) {
1,557,349✔
291
            /* Clear the rest (i.e. excluding the initial 2 words). */
292
            BZERO((word *)result + 2,
1,517,339✔
293
                  n_blocks * HBLKSIZE - 2 * sizeof(word));
294
          }
295
#       endif
296
    }
297
    if (0 == result) {
2,997,470✔
298
        return((*GC_get_oom_fn())(lb));
18✔
299
    } else {
300
        return(result);
2,997,452✔
301
    }
302
}
303

304
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
13,584,480✔
305
{
306
    GC_ASSERT(k < MAXOBJKINDS);
13,584,480✔
307
    if (SMALL_OBJ(lb)) {
13,584,480✔
308
        void *op;
309
        void **opp;
310
        size_t lg;
311
        DCL_LOCK_STATE;
312

313
        GC_DBG_COLLECT_AT_MALLOC(lb);
314
        LOCK();
12,009,177✔
315
        lg = GC_size_map[lb];
12,028,084✔
316
        opp = &GC_obj_kinds[k].ok_freelist[lg];
12,028,084✔
317
        op = *opp;
12,028,084✔
318
        if (EXPECT(op != NULL, TRUE)) {
12,028,084✔
319
            if (k == PTRFREE) {
11,567,486✔
320
                *opp = obj_link(op);
147,823✔
321
            } else {
322
                GC_ASSERT(0 == obj_link(op)
11,419,663✔
323
                          || ((word)obj_link(op)
324
                                <= (word)GC_greatest_plausible_heap_addr
325
                              && (word)obj_link(op)
326
                                >= (word)GC_least_plausible_heap_addr));
327
                *opp = obj_link(op);
11,419,663✔
328
                obj_link(op) = 0;
11,419,663✔
329
            }
330
            GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
11,567,486✔
331
            UNLOCK();
11,567,486✔
332
            return op;
11,567,472✔
333
        }
334
        UNLOCK();
460,598✔
335
    }
336

337
    /* We make the GC_clear_stack() call a tail one, hoping to get more */
338
    /* of the stack.                                                    */
339
    return GC_clear_stack(GC_generic_malloc(lb, k));
2,035,900✔
340
}
341

342
#if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
343
  GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t lb, int k)
344
  {
345
    return GC_malloc_kind_global(lb, k);
346
  }
347
#endif
348

349
/* Allocate lb bytes of atomic (pointer-free) data.     */
350
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t lb)
27,022,487✔
351
{
352
    return GC_malloc_kind(lb, PTRFREE);
27,022,487✔
353
}
354

355
/* Allocate lb bytes of composite (pointerful) data.    */
356
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t lb)
151,243,609✔
357
{
358
    return GC_malloc_kind(lb, NORMAL);
151,243,609✔
359
}
360

361
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc_uncollectable(
68,696✔
362
                                                        size_t lb, int k)
363
{
364
    void *op;
365
    DCL_LOCK_STATE;
366

367
    GC_ASSERT(k < MAXOBJKINDS);
68,696✔
368
    if (SMALL_OBJ(lb)) {
137,392✔
369
        void **opp;
370
        size_t lg;
371

372
        GC_DBG_COLLECT_AT_MALLOC(lb);
373
        if (EXTRA_BYTES != 0 && lb != 0) lb--;
68,696✔
374
                  /* We don't need the extra byte, since this won't be  */
375
                  /* collected anyway.                                  */
376
        LOCK();
68,696✔
377
        lg = GC_size_map[lb];
68,696✔
378
        opp = &GC_obj_kinds[k].ok_freelist[lg];
68,696✔
379
        op = *opp;
68,696✔
380
        if (EXPECT(op != NULL, TRUE)) {
68,696✔
381
            *opp = obj_link(op);
68,323✔
382
            obj_link(op) = 0;
68,323✔
383
            GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
68,323✔
384
            /* Mark bit was already set on free list.  It will be       */
385
            /* cleared only temporarily during a collection, as a       */
386
            /* result of the normal free list mark bit clearing.        */
387
            GC_non_gc_bytes += GRANULES_TO_BYTES((word)lg);
68,323✔
388
            UNLOCK();
68,323✔
389
        } else {
390
            UNLOCK();
373✔
391
            op = GC_generic_malloc(lb, k);
373✔
392
            /* For small objects, the free lists are completely marked. */
393
        }
394
        GC_ASSERT(0 == op || GC_is_marked(op));
68,696✔
395
    } else {
396
      op = GC_generic_malloc(lb, k);
×
397
      if (op /* != NULL */) { /* CPPCHECK */
×
398
        hdr * hhdr = HDR(op);
×
399

400
        GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
×
401
        /* We don't need the lock here, since we have an undisguised    */
402
        /* pointer.  We do need to hold the lock while we adjust        */
403
        /* mark bits.                                                   */
404
        LOCK();
×
405
        set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
×
406
#       ifndef THREADS
407
          GC_ASSERT(hhdr -> hb_n_marks == 0);
408
                /* This is not guaranteed in the multi-threaded case    */
409
                /* because the counter could be updated before locking. */
410
#       endif
411
        hhdr -> hb_n_marks = 1;
×
412
        UNLOCK();
×
413
      }
414
    }
415
    return op;
68,696✔
416
}
417

418
/* Allocate lb bytes of pointerful, traced, but not collectible data.   */
419
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb)
68,612✔
420
{
421
  return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE);
68,612✔
422
}
423

424
#ifdef GC_ATOMIC_UNCOLLECTABLE
425
  /* Allocate lb bytes of pointer-free, untraced, uncollectible data    */
426
  /* This is normally roughly equivalent to the system malloc.          */
427
  /* But it may be useful if malloc is redefined.                       */
428
  GC_API GC_ATTR_MALLOC void * GC_CALL
429
        GC_malloc_atomic_uncollectable(size_t lb)
×
430
  {
431
    return GC_generic_malloc_uncollectable(lb, AUNCOLLECTABLE);
×
432
  }
433
#endif /* GC_ATOMIC_UNCOLLECTABLE */
434

435
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
436

437
# ifndef MSWINCE
438
#  include <errno.h>
439
# endif
440

441
  /* Avoid unnecessary nested procedure calls here, by #defining some   */
442
  /* malloc replacements.  Otherwise we end up saving a meaningless     */
443
  /* return address in the object.  It also speeds things up, but it is */
444
  /* admittedly quite ugly.                                             */
445
# define GC_debug_malloc_replacement(lb) GC_debug_malloc(lb, GC_DBG_EXTRAS)
446

447
# if defined(CPPCHECK)
448
#   define REDIRECT_MALLOC_F GC_malloc /* e.g. */
449
# else
450
#   define REDIRECT_MALLOC_F REDIRECT_MALLOC
451
# endif
452

453
  void * malloc(size_t lb)
454
  {
455
    /* It might help to manually inline the GC_malloc call here.        */
456
    /* But any decent compiler should reduce the extra procedure call   */
457
    /* to at most a jump instruction in this case.                      */
458
#   if defined(I386) && defined(GC_SOLARIS_THREADS)
459
      /* Thread initialization can call malloc before we are ready for. */
460
      /* It is not clear that this is enough to help matters.           */
461
      /* The thread implementation may well call malloc at other        */
462
      /* inopportune times.                                             */
463
      if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
464
#   endif
465
    return (void *)REDIRECT_MALLOC_F(lb);
466
  }
467

468
# if defined(GC_LINUX_THREADS)
469
    STATIC ptr_t GC_libpthread_start = 0;
470
    STATIC ptr_t GC_libpthread_end = 0;
471
    STATIC ptr_t GC_libld_start = 0;
472
    STATIC ptr_t GC_libld_end = 0;
473

474
    STATIC void GC_init_lib_bounds(void)
475
    {
476
      IF_CANCEL(int cancel_state;)
477

478
      if (GC_libpthread_start != 0) return;
479
      DISABLE_CANCEL(cancel_state);
480
      GC_init(); /* if not called yet */
481
      if (!GC_text_mapping("libpthread-",
482
                           &GC_libpthread_start, &GC_libpthread_end)) {
483
        /* Some libc implementations like bionic, musl and glibc 2.34   */
484
        /* do not have libpthread.so because the pthreads-related code  */
485
        /* is located in libc.so, thus potential calloc calls from such */
486
        /* code are forwarded to real (libc) calloc without any special */
487
        /* handling on the libgc side.  Checking glibc version at       */
488
        /* compile time to turn off the warning seems to be fine.       */
489
        /* TODO: Remove GC_text_mapping() call for this case.           */
490
#       if defined(__GLIBC__) \
491
           && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ < 34))
492
          WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
493
          /* This might still work with some versions of libpthread,      */
494
          /* so we do not abort.                                          */
495
#       endif
496
          /* Generate message only once:                                  */
497
            GC_libpthread_start = (ptr_t)1;
498
      }
499
      if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
500
          WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
501
      }
502
      RESTORE_CANCEL(cancel_state);
503
    }
504
# endif /* GC_LINUX_THREADS */
505

506
  void * calloc(size_t n, size_t lb)
507
  {
508
    if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */
509
        && lb && n > GC_SIZE_MAX / lb)
510
      return (*GC_get_oom_fn())(GC_SIZE_MAX); /* n*lb overflow */
511
#   if defined(GC_LINUX_THREADS)
512
      /* libpthread allocated some memory that is only pointed to by    */
513
      /* mmapped thread stacks.  Make sure it is not collectible.       */
514
      {
515
        static GC_bool lib_bounds_set = FALSE;
516
        ptr_t caller = (ptr_t)__builtin_return_address(0);
517
        /* This test does not need to ensure memory visibility, since   */
518
        /* the bounds will be set when/if we create another thread.     */
519
        if (!EXPECT(lib_bounds_set, TRUE)) {
520
          GC_init_lib_bounds();
521
          lib_bounds_set = TRUE;
522
        }
523
        if (((word)caller >= (word)GC_libpthread_start
524
             && (word)caller < (word)GC_libpthread_end)
525
            || ((word)caller >= (word)GC_libld_start
526
                && (word)caller < (word)GC_libld_end))
527
          return GC_generic_malloc_uncollectable(n * lb, UNCOLLECTABLE);
528
        /* The two ranges are actually usually adjacent, so there may   */
529
        /* be a way to speed this up.                                   */
530
      }
531
#   endif
532
    return (void *)REDIRECT_MALLOC_F(n * lb);
533
  }
534

535
# ifndef strdup
536
    char *strdup(const char *s)
537
    {
538
      size_t lb = strlen(s) + 1;
539
      char *result = (char *)REDIRECT_MALLOC_F(lb);
540
      if (result == 0) {
541
        errno = ENOMEM;
542
        return 0;
543
      }
544
      BCOPY(s, result, lb);
545
      return result;
546
    }
547
# endif /* !defined(strdup) */
548
 /* If strdup is macro defined, we assume that it actually calls malloc, */
549
 /* and thus the right thing will happen even without overriding it.     */
550
 /* This seems to be true on most Linux systems.                         */
551

552
# ifndef strndup
553
    /* This is similar to strdup().     */
554
    char *strndup(const char *str, size_t size)
555
    {
556
      char *copy;
557
      size_t len = strlen(str);
558
      if (len > size)
559
        len = size;
560
      copy = (char *)REDIRECT_MALLOC_F(len + 1);
561
      if (copy == NULL) {
562
        errno = ENOMEM;
563
        return NULL;
564
      }
565
      if (EXPECT(len > 0, TRUE))
566
        BCOPY(str, copy, len);
567
      copy[len] = '\0';
568
      return copy;
569
    }
570
# endif /* !strndup */
571

572
# undef GC_debug_malloc_replacement
573

574
#endif /* REDIRECT_MALLOC */
575

576
/* Explicitly deallocate an object p.                           */
577
GC_API void GC_CALL GC_free(void * p)
4,892,846✔
578
{
579
    struct hblk *h;
580
    hdr *hhdr;
581
    size_t sz; /* In bytes */
582
    size_t ngranules;   /* sz in granules */
583
    int knd;
584
    struct obj_kind * ok;
585
    DCL_LOCK_STATE;
586

587
    if (p /* != NULL */) {
4,892,846✔
588
        /* CPPCHECK */
589
    } else {
590
        /* Required by ANSI.  It's not my fault ...     */
591
        return;
42,045✔
592
    }
593

594
#   ifdef LOG_ALLOCS
595
      GC_log_printf("GC_free(%p) after GC #%lu\n",
596
                    p, (unsigned long)GC_gc_no);
597
#   endif
598
    h = HBLKPTR(p);
4,850,801✔
599
    hhdr = HDR(h);
4,850,801✔
600
#   if defined(REDIRECT_MALLOC) && \
601
        ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \
602
         || defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
603
         || defined(MSWIN32))
604
        /* This might be called indirectly by GC_print_callers to free  */
605
        /* the result of backtrace_symbols.                             */
606
        /* For Solaris, we have to redirect malloc calls during         */
607
        /* initialization.  For the others, this seems to happen        */
608
        /* implicitly.                                                  */
609
        /* Don't try to deallocate that memory.                         */
610
        if (0 == hhdr) return;
611
#   endif
612
    GC_ASSERT(GC_base(p) == p);
4,851,632✔
613
    sz = (size_t)hhdr->hb_sz;
4,851,802✔
614
    ngranules = BYTES_TO_GRANULES(sz);
4,851,802✔
615
    knd = hhdr -> hb_obj_kind;
4,851,802✔
616
    ok = &GC_obj_kinds[knd];
4,851,802✔
617
    if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
4,851,802✔
618
        void **flh;
619

620
        LOCK();
4,851,718✔
621
        GC_bytes_freed += sz;
4,851,768✔
622
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
4,851,768✔
623
                /* It's unnecessary to clear the mark bit.  If the      */
624
                /* object is reallocated, it doesn't matter.  O.w. the  */
625
                /* collector will do it, since it's on a free list.     */
626
        if (ok -> ok_init && EXPECT(sz > sizeof(word), TRUE)) {
4,851,768✔
627
            BZERO((word *)p + 1, sz-sizeof(word));
3,305,360✔
628
        }
629
        flh = &(ok -> ok_freelist[ngranules]);
4,851,768✔
630
        obj_link(p) = *flh;
4,851,768✔
631
        *flh = (ptr_t)p;
4,851,768✔
632
        UNLOCK();
4,851,768✔
633
    } else {
634
        size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
84✔
635

636
        LOCK();
84✔
637
        GC_bytes_freed += sz;
84✔
638
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
84✔
639
        if (nblocks > 1) {
84✔
640
          GC_large_allocd_bytes -= nblocks * HBLKSIZE;
84✔
641
        }
642
        GC_freehblk(h);
84✔
643
        UNLOCK();
84✔
644
    }
645
}
646

647
/* Explicitly deallocate an object p when we already hold lock.         */
648
/* Only used for internally allocated objects, so we can take some      */
649
/* shortcuts.                                                           */
650
#ifdef THREADS
651
  GC_INNER void GC_free_inner(void * p)
49,533✔
652
  {
653
    struct hblk *h;
654
    hdr *hhdr;
655
    size_t sz; /* bytes */
656
    size_t ngranules;  /* sz in granules */
657
    int knd;
658
    struct obj_kind * ok;
659

660
    h = HBLKPTR(p);
49,533✔
661
    hhdr = HDR(h);
49,533✔
662
    knd = hhdr -> hb_obj_kind;
49,533✔
663
    sz = (size_t)hhdr->hb_sz;
49,533✔
664
    ngranules = BYTES_TO_GRANULES(sz);
49,533✔
665
    ok = &GC_obj_kinds[knd];
49,533✔
666
    if (ngranules <= MAXOBJGRANULES) {
49,533✔
667
        void ** flh;
668

669
        GC_bytes_freed += sz;
49,533✔
670
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
49,533✔
671
        if (ok -> ok_init && EXPECT(sz > sizeof(word), TRUE)) {
49,533✔
672
            BZERO((word *)p + 1, sz-sizeof(word));
49,533✔
673
        }
674
        flh = &(ok -> ok_freelist[ngranules]);
49,533✔
675
        obj_link(p) = *flh;
49,533✔
676
        *flh = (ptr_t)p;
49,533✔
677
    } else {
678
        size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
×
679
        GC_bytes_freed += sz;
×
680
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
×
681
        if (nblocks > 1) {
×
682
          GC_large_allocd_bytes -= nblocks * HBLKSIZE;
×
683
        }
684
        GC_freehblk(h);
×
685
    }
686
  }
49,533✔
687
#endif /* THREADS */
688

689
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
690
# define REDIRECT_FREE GC_free
691
#endif
692

693
#if defined(REDIRECT_FREE) && !defined(REDIRECT_MALLOC_IN_HEADER)
694

695
# if defined(CPPCHECK)
696
#   define REDIRECT_FREE_F GC_free /* e.g. */
697
# else
698
#   define REDIRECT_FREE_F REDIRECT_FREE
699
# endif
700

701
  void free(void * p)
702
  {
703
#   ifndef IGNORE_FREE
704
#     if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
705
        /* Don't bother with initialization checks.  If nothing         */
706
        /* has been initialized, the check fails, and that's safe,      */
707
        /* since we have not allocated uncollectible objects neither.   */
708
        ptr_t caller = (ptr_t)__builtin_return_address(0);
709
        /* This test does not need to ensure memory visibility, since   */
710
        /* the bounds will be set when/if we create another thread.     */
711
        if (((word)caller >= (word)GC_libpthread_start
712
             && (word)caller < (word)GC_libpthread_end)
713
            || ((word)caller >= (word)GC_libld_start
714
                && (word)caller < (word)GC_libld_end)) {
715
          GC_free(p);
716
          return;
717
        }
718
#     endif
719
      REDIRECT_FREE_F(p);
720
#   endif
721
  }
722
#endif /* REDIRECT_FREE */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc