• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1485

17 Apr 2023 05:30PM UTC coverage: 76.519% (+0.02%) from 76.502%
1485

push

travis-ci-com

ivmai
Prevent 'function should return a value' BCC error in CMake script

* CMakeLists.txt [enable_werror && BORLAND && enable_threads]: Pass
"/w-rvl" to add_compile_options; add comment.

7772 of 10157 relevant lines covered (76.52%)

8798128.86 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.83
/malloc.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5
 * Copyright (c) 2008-2022 Ivan Maidanski
6
 *
7
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9
 *
10
 * Permission is hereby granted to use or copy this program
11
 * for any purpose, provided the above notices are retained on all copies.
12
 * Permission to modify the code and to distribute modified code is granted,
13
 * provided the above notices are retained, and a notice that the code was
14
 * modified is included with the above copyright notice.
15
 */
16

17
#include "private/gc_priv.h"
18

19
#include <string.h>
20

21
/* Allocate reclaim list for the kind.  Returns TRUE on success.        */
22
STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *ok)
154✔
23
{
24
    struct hblk ** result;
25

26
    GC_ASSERT(I_HOLD_LOCK());
154✔
27
    result = (struct hblk **)GC_scratch_alloc(
154✔
28
                                (MAXOBJGRANULES+1) * sizeof(struct hblk *));
29
    if (EXPECT(NULL == result, FALSE)) return FALSE;
154✔
30

31
    BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
154✔
32
    ok -> ok_reclaim_list = result;
154✔
33
    return TRUE;
154✔
34
}
35

36
GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags,
2,015,060✔
37
                              size_t align_m1)
38
{
39
    struct hblk * h;
40
    size_t n_blocks; /* includes alignment */
41
    ptr_t result = NULL;
2,015,060✔
42
    GC_bool retry = FALSE;
2,015,060✔
43

44
    GC_ASSERT(I_HOLD_LOCK());
2,015,060✔
45
    lb = ROUNDUP_GRANULE_SIZE(lb);
2,015,060✔
46
    n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(SIZET_SAT_ADD(lb, align_m1));
2,015,060✔
47
    if (!EXPECT(GC_is_initialized, TRUE)) {
2,015,060✔
48
      UNLOCK(); /* just to unset GC_lock_holder */
×
49
      GC_init();
×
50
      LOCK();
×
51
    }
52
    /* Do our share of marking work.    */
53
    if (GC_incremental && !GC_dont_gc) {
2,015,060✔
54
            ENTER_GC();
1,646,190✔
55
            GC_collect_a_little_inner((int)n_blocks);
1,646,190✔
56
            EXIT_GC();
1,646,189✔
57
    }
58

59
    h = GC_allochblk(lb, k, flags, align_m1);
2,015,059✔
60
#   ifdef USE_MUNMAP
61
        if (NULL == h) {
2,015,059✔
62
            GC_merge_unmapped();
3,553✔
63
            h = GC_allochblk(lb, k, flags, align_m1);
3,553✔
64
        }
65
#   endif
66
    while (0 == h && GC_collect_or_expand(n_blocks, flags, retry)) {
4,033,653✔
67
        h = GC_allochblk(lb, k, flags, align_m1);
3,535✔
68
        retry = TRUE;
3,535✔
69
    }
70
    if (EXPECT(h != NULL, TRUE)) {
2,015,059✔
71
        GC_bytes_allocd += lb;
2,015,041✔
72
        if (lb > HBLKSIZE) {
2,015,041✔
73
            GC_large_allocd_bytes += HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb);
862,420✔
74
            if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
862,420✔
75
                GC_max_large_allocd_bytes = GC_large_allocd_bytes;
6,896✔
76
        }
77
        /* FIXME: Do we need some way to reset GC_max_large_allocd_bytes? */
78
        result = h -> hb_body;
2,015,041✔
79
        GC_ASSERT(((word)result & align_m1) == 0);
2,015,041✔
80
    }
81
    return result;
2,015,059✔
82
}
83

84
/* Allocate a large block of size lb bytes.  Clear if appropriate.      */
85
/* EXTRA_BYTES were already added to lb.  Update GC_bytes_allocd.       */
86
STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
682✔
87
{
88
    ptr_t result;
89

90
    GC_ASSERT(I_HOLD_LOCK());
682✔
91
    result = GC_alloc_large(lb, k, flags, 0 /* align_m1 */);
682✔
92
    if (EXPECT(result != NULL, TRUE)
682✔
93
          && (GC_debugging_started || GC_obj_kinds[k].ok_init)) {
682✔
94
        /* Clear the whole block, in case of GC_realloc call. */
95
        BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb));
682✔
96
    }
97
    return result;
682✔
98
}
99

100
/* Fill in additional entries in GC_size_map, including the i-th one.   */
101
/* Note that a filled in section of the array ending at n always        */
102
/* has the length of at least n/4.                                      */
103
STATIC void GC_extend_size_map(size_t i)
178✔
104
{
105
  size_t orig_granule_sz = ALLOC_REQUEST_GRANS(i);
178✔
106
  size_t granule_sz;
107
  size_t byte_sz = GRANULES_TO_BYTES(orig_granule_sz);
178✔
108
                        /* The size we try to preserve.         */
109
                        /* Close to i, unless this would        */
110
                        /* introduce too many distinct sizes.   */
111
  size_t smaller_than_i = byte_sz - (byte_sz >> 3);
178✔
112
  size_t low_limit; /* The lowest indexed entry we initialize.  */
113
  size_t number_of_objs;
114

115
  GC_ASSERT(I_HOLD_LOCK());
178✔
116
  GC_ASSERT(0 == GC_size_map[i]);
178✔
117
  if (0 == GC_size_map[smaller_than_i]) {
178✔
118
    low_limit = byte_sz - (byte_sz >> 2); /* much smaller than i */
74✔
119
    granule_sz = orig_granule_sz;
74✔
120
    while (GC_size_map[low_limit] != 0)
4,788✔
121
      low_limit++;
4,640✔
122
  } else {
123
    low_limit = smaller_than_i + 1;
104✔
124
    while (GC_size_map[low_limit] != 0)
5,496✔
125
      low_limit++;
5,288✔
126

127
    granule_sz = ALLOC_REQUEST_GRANS(low_limit);
104✔
128
    granule_sz += granule_sz >> 3;
104✔
129
    if (granule_sz < orig_granule_sz)
104✔
130
      granule_sz = orig_granule_sz;
×
131
  }
132

133
  /* For these larger sizes, we use an even number of granules.         */
134
  /* This makes it easier to, e.g., construct a 16-byte-aligned         */
135
  /* allocator even if GRANULE_BYTES is 8.                              */
136
  granule_sz = (granule_sz + 1) & ~1;
178✔
137
  if (granule_sz > MAXOBJGRANULES)
178✔
138
    granule_sz = MAXOBJGRANULES;
×
139

140
  /* If we can fit the same number of larger objects in a block, do so. */
141
  number_of_objs = HBLK_GRANULES / granule_sz;
178✔
142
  GC_ASSERT(number_of_objs != 0);
178✔
143
  granule_sz = (HBLK_GRANULES / number_of_objs) & ~1;
178✔
144

145
  byte_sz = GRANULES_TO_BYTES(granule_sz) - EXTRA_BYTES;
178✔
146
                        /* We may need one extra byte; do not always    */
147
                        /* fill in GC_size_map[byte_sz].                */
148

149
  for (; low_limit <= byte_sz; low_limit++)
53,516✔
150
    GC_size_map[low_limit] = granule_sz;
53,338✔
151
}
178✔
152

153
STATIC void * GC_generic_malloc_inner_small(size_t lb, int k)
8,578,430✔
154
{
155
  struct obj_kind *ok = &GC_obj_kinds[k];
8,578,430✔
156
  size_t lg = GC_size_map[lb];
8,578,430✔
157
  void ** opp = &(ok -> ok_freelist[lg]);
8,578,430✔
158
  void *op = *opp;
8,578,430✔
159

160
  GC_ASSERT(I_HOLD_LOCK());
8,578,430✔
161
  if (EXPECT(NULL == op, FALSE)) {
8,578,430✔
162
    if (lg == 0) {
818,453✔
163
      if (!EXPECT(GC_is_initialized, TRUE)) {
178✔
164
        UNLOCK(); /* just to unset GC_lock_holder */
×
165
        GC_init();
×
166
        LOCK();
×
167
        lg = GC_size_map[lb];
×
168
      }
169
      if (0 == lg) {
178✔
170
        GC_extend_size_map(lb);
178✔
171
        lg = GC_size_map[lb];
178✔
172
        GC_ASSERT(lg != 0);
178✔
173
      }
174
      /* Retry */
175
      opp = &(ok -> ok_freelist[lg]);
178✔
176
      op = *opp;
178✔
177
    }
178
    if (NULL == op) {
818,453✔
179
      if (NULL == ok -> ok_reclaim_list
818,453✔
180
          && !GC_alloc_reclaim_list(ok))
154✔
181
        return NULL;
×
182
      op = GC_allocobj(lg, k);
818,453✔
183
      if (NULL == op) return NULL;
818,453✔
184
    }
185
  }
186
  *opp = obj_link(op);
8,578,430✔
187
  obj_link(op) = NULL;
8,578,430✔
188
  GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
8,578,430✔
189
  return op;
8,578,430✔
190
}
191

192
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k, unsigned flags)
7,227,022✔
193
{
194
    size_t lb_adjusted;
195

196
    GC_ASSERT(I_HOLD_LOCK());
7,227,022✔
197
    GC_ASSERT(k < MAXOBJKINDS);
7,227,022✔
198
    if (SMALL_OBJ(lb)) {
7,227,022✔
199
        return GC_generic_malloc_inner_small(lb, k);
7,226,340✔
200
    }
201

202
#   if MAX_EXTRA_BYTES > 0
203
      if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
682✔
204
        /* No need to add EXTRA_BYTES.  */
205
        lb_adjusted = lb;
490✔
206
      } else
207
#   endif
208
    /* else */ {
209
      lb_adjusted = ADD_EXTRA_BYTES(lb);
192✔
210
    }
211
    return GC_alloc_large_and_clear(lb_adjusted, k, flags);
682✔
212
}
213

214
#ifdef GC_COLLECT_AT_MALLOC
215
  /* Parameter to force GC at every malloc of size greater or equal to  */
216
  /* the given value.  This might be handy during debugging.            */
217
# if defined(CPPCHECK)
218
    size_t GC_dbg_collect_at_malloc_min_lb = 16*1024; /* e.g. */
219
# else
220
    size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
221
# endif
222
#endif
223

224
GC_INNER void * GC_generic_malloc_aligned(size_t lb, int k, unsigned flags,
3,365,940✔
225
                                          size_t align_m1)
226
{
227
    void * result;
228

229
    GC_ASSERT(k < MAXOBJKINDS);
3,365,940✔
230
    if (EXPECT(get_have_errors(), FALSE))
3,365,940✔
231
      GC_print_all_errors();
90✔
232
    GC_INVOKE_FINALIZERS();
3,365,940✔
233
    GC_DBG_COLLECT_AT_MALLOC(lb);
234
    if (SMALL_OBJ(lb) && EXPECT(align_m1 < GRANULE_BYTES, TRUE)) {
3,365,999✔
235
        LOCK();
1,351,643✔
236
        result = GC_generic_malloc_inner_small(lb, k);
1,351,716✔
237
        UNLOCK();
1,351,771✔
238
    } else {
239
#       ifdef THREADS
240
          size_t lg;
241
#       endif
242
        size_t lb_rounded;
243
        GC_bool init;
244

245
#       if MAX_EXTRA_BYTES > 0
246
          if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
2,014,356✔
247
            /* No need to add EXTRA_BYTES.      */
248
            lb_rounded = ROUNDUP_GRANULE_SIZE(lb);
422,141✔
249
#           ifdef THREADS
250
              lg = BYTES_TO_GRANULES(lb_rounded);
422,141✔
251
#           endif
252
          } else
253
#       endif
254
        /* else */ {
255
#         ifndef THREADS
256
            size_t lg; /* CPPCHECK */
257
#         endif
258

259
          if (EXPECT(0 == lb, FALSE)) lb = 1;
1,592,215✔
260
          lg = ALLOC_REQUEST_GRANS(lb);
1,592,215✔
261
          lb_rounded = GRANULES_TO_BYTES(lg);
1,592,215✔
262
        }
263

264
        init = GC_obj_kinds[k].ok_init;
2,014,356✔
265
        if (EXPECT(align_m1 < GRANULE_BYTES, TRUE)) {
2,014,356✔
266
          align_m1 = 0;
2,014,104✔
267
        } else if (align_m1 < HBLKSIZE) {
252✔
268
          align_m1 = HBLKSIZE - 1;
168✔
269
        }
270
        LOCK();
2,014,356✔
271
        result = GC_alloc_large(lb_rounded, k, flags, align_m1);
2,014,367✔
272
        if (EXPECT(result != NULL, TRUE)) {
2,014,377✔
273
          if (GC_debugging_started
2,014,363✔
274
#             ifndef THREADS
275
                || init
276
#             endif
277
             ) {
278
            BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_rounded));
4✔
279
          } else {
280
#           ifdef THREADS
281
              GC_ASSERT(GRANULES_TO_WORDS(lg) >= 2);
2,014,359✔
282
              /* Clear any memory that might be used for GC descriptors */
283
              /* before we release the lock.                            */
284
                ((word *)result)[0] = 0;
2,014,359✔
285
                ((word *)result)[1] = 0;
2,014,359✔
286
                ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
2,014,359✔
287
                ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
2,014,359✔
288
#           endif
289
          }
290
        }
291
        UNLOCK();
2,014,377✔
292
#       ifdef THREADS
293
          if (init && !GC_debugging_started && result != NULL) {
2,010,623✔
294
            /* Clear the rest (i.e. excluding the initial 2 words). */
295
            BZERO((word *)result + 2,
1,934,332✔
296
                  HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_rounded) - 2 * sizeof(word));
297
          }
298
#       endif
299
    }
300
    if (EXPECT(NULL == result, FALSE))
3,362,363✔
301
      result = (*GC_get_oom_fn())(lb); /* might be misaligned */
18✔
302
    return result;
3,362,363✔
303
}
304

305
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k)
12,748✔
306
{
307
    return GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0 /* align_m1 */);
12,748✔
308
}
309

310
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
11,258,853✔
311
{
312
    GC_ASSERT(k < MAXOBJKINDS);
11,258,853✔
313
    if (SMALL_OBJ(lb)) {
11,258,853✔
314
        void *op;
315
        void **opp;
316
        size_t lg;
317

318
        GC_DBG_COLLECT_AT_MALLOC(lb);
319
        LOCK();
9,667,566✔
320
        lg = GC_size_map[lb];
9,667,900✔
321
        opp = &GC_obj_kinds[k].ok_freelist[lg];
9,667,900✔
322
        op = *opp;
9,667,900✔
323
        if (EXPECT(op != NULL, TRUE)) {
9,667,900✔
324
            if (k == PTRFREE) {
9,173,171✔
325
                *opp = obj_link(op);
165,849✔
326
            } else {
327
                GC_ASSERT(0 == obj_link(op)
9,007,322✔
328
                          || ((word)obj_link(op)
329
                                < (word)GC_greatest_plausible_heap_addr
330
                              && (word)obj_link(op)
331
                                >= (word)GC_least_plausible_heap_addr));
332
                *opp = obj_link(op);
9,007,322✔
333
                obj_link(op) = 0;
9,007,322✔
334
            }
335
            GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
9,173,171✔
336
            UNLOCK();
9,173,171✔
337
            return op;
9,173,145✔
338
        }
339
        UNLOCK();
494,729✔
340
    }
341

342
    /* We make the GC_clear_stack() call a tail one, hoping to get more */
343
    /* of the stack.                                                    */
344
    return GC_clear_stack(GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0));
2,086,016✔
345
}
346

347
#if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
348
  GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t lb, int k)
349
  {
350
    return GC_malloc_kind_global(lb, k);
351
  }
352
#endif
353

354
/* Allocate lb bytes of atomic (pointer-free) data.     */
355
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t lb)
28,744,606✔
356
{
357
    return GC_malloc_kind(lb, PTRFREE);
28,744,606✔
358
}
359

360
/* Allocate lb bytes of composite (pointerful) data.    */
361
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t lb)
124,729,668✔
362
{
363
    return GC_malloc_kind(lb, NORMAL);
124,729,668✔
364
}
365

366
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc_uncollectable(
50,696✔
367
                                                        size_t lb, int k)
368
{
369
    void *op;
370
    size_t lb_orig = lb;
50,696✔
371

372
    GC_ASSERT(k < MAXOBJKINDS);
50,696✔
373
    if (EXTRA_BYTES != 0 && EXPECT(lb != 0, TRUE)) lb--;
50,696✔
374
                /* We do not need the extra byte, since this will   */
375
                /* not be collected anyway.                         */
376

377
    if (SMALL_OBJ(lb)) {
101,392✔
378
        void **opp;
379
        size_t lg;
380

381
        if (EXPECT(get_have_errors(), FALSE))
50,696✔
382
          GC_print_all_errors();
×
383
        GC_INVOKE_FINALIZERS();
50,696✔
384
        GC_DBG_COLLECT_AT_MALLOC(lb_orig);
385
        LOCK();
50,696✔
386
        lg = GC_size_map[lb];
50,696✔
387
        opp = &GC_obj_kinds[k].ok_freelist[lg];
50,696✔
388
        op = *opp;
50,696✔
389
        if (EXPECT(op != NULL, TRUE)) {
50,696✔
390
            *opp = obj_link(op);
50,377✔
391
            obj_link(op) = 0;
50,377✔
392
            GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
50,377✔
393
            /* Mark bit was already set on free list.  It will be       */
394
            /* cleared only temporarily during a collection, as a       */
395
            /* result of the normal free list mark bit clearing.        */
396
            GC_non_gc_bytes += GRANULES_TO_BYTES((word)lg);
50,377✔
397
        } else {
398
            op = GC_generic_malloc_inner_small(lb, k);
319✔
399
            if (NULL == op) {
319✔
400
              GC_oom_func oom_fn = GC_oom_fn;
×
401
              UNLOCK();
×
402
              return (*oom_fn)(lb_orig);
×
403
            }
404
            /* For small objects, the free lists are completely marked. */
405
        }
406
        GC_ASSERT(GC_is_marked(op));
50,696✔
407
        UNLOCK();
50,696✔
408
    } else {
409
      op = GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0 /* align_m1 */);
×
410
      if (op /* != NULL */) { /* CPPCHECK */
×
411
        hdr * hhdr = HDR(op);
×
412

413
        GC_ASSERT(HBLKDISPL(op) == 0); /* large block */
×
414
        /* We don't need the lock here, since we have an undisguised    */
415
        /* pointer.  We do need to hold the lock while we adjust        */
416
        /* mark bits.                                                   */
417
        LOCK();
×
418
        set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
×
419
#       ifndef THREADS
420
          GC_ASSERT(hhdr -> hb_n_marks == 0);
421
                /* This is not guaranteed in the multi-threaded case    */
422
                /* because the counter could be updated before locking. */
423
#       endif
424
        hhdr -> hb_n_marks = 1;
×
425
        UNLOCK();
×
426
      }
427
    }
428
    return op;
50,696✔
429
}
430

431
/* Allocate lb bytes of pointerful, traced, but not collectible data.   */
432
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb)
50,612✔
433
{
434
  return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE);
50,612✔
435
}
436

437
#ifdef GC_ATOMIC_UNCOLLECTABLE
438
  /* Allocate lb bytes of pointer-free, untraced, uncollectible data    */
439
  /* This is normally roughly equivalent to the system malloc.          */
440
  /* But it may be useful if malloc is redefined.                       */
441
  GC_API GC_ATTR_MALLOC void * GC_CALL
442
        GC_malloc_atomic_uncollectable(size_t lb)
×
443
  {
444
    return GC_generic_malloc_uncollectable(lb, AUNCOLLECTABLE);
×
445
  }
446
#endif /* GC_ATOMIC_UNCOLLECTABLE */
447

448
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
449

450
# ifndef MSWINCE
451
#   include <errno.h>
452
# endif
453

454
  /* Avoid unnecessary nested procedure calls here, by #defining some   */
455
  /* malloc replacements.  Otherwise we end up saving a meaningless     */
456
  /* return address in the object.  It also speeds things up, but it is */
457
  /* admittedly quite ugly.                                             */
458
# define GC_debug_malloc_replacement(lb) GC_debug_malloc(lb, GC_DBG_EXTRAS)
459

460
# if defined(CPPCHECK)
461
#   define REDIRECT_MALLOC_F GC_malloc /* e.g. */
462
# else
463
#   define REDIRECT_MALLOC_F REDIRECT_MALLOC
464
# endif
465

466
  void * malloc(size_t lb)
467
  {
468
    /* It might help to manually inline the GC_malloc call here.        */
469
    /* But any decent compiler should reduce the extra procedure call   */
470
    /* to at most a jump instruction in this case.                      */
471
#   if defined(I386) && defined(GC_SOLARIS_THREADS)
472
      /* Thread initialization can call malloc before we are ready for. */
473
      /* It is not clear that this is enough to help matters.           */
474
      /* The thread implementation may well call malloc at other        */
475
      /* inopportune times.                                             */
476
      if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
477
#   endif
478
    return (void *)REDIRECT_MALLOC_F(lb);
479
  }
480

481
# if defined(GC_LINUX_THREADS)
482
#   ifdef HAVE_LIBPTHREAD_SO
483
      STATIC ptr_t GC_libpthread_start = NULL;
484
      STATIC ptr_t GC_libpthread_end = NULL;
485
#   endif
486
    STATIC ptr_t GC_libld_start = NULL;
487
    STATIC ptr_t GC_libld_end = NULL;
488

489
    STATIC void GC_init_lib_bounds(void)
490
    {
491
      IF_CANCEL(int cancel_state;)
492

493
      DISABLE_CANCEL(cancel_state);
494
      GC_init(); /* if not called yet */
495
#     ifdef HAVE_LIBPTHREAD_SO
496
        if (!GC_text_mapping("libpthread-",
497
                             &GC_libpthread_start, &GC_libpthread_end)) {
498
          WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
499
          /* This might still work with some versions of libpthread,    */
500
          /* so we do not abort.                                        */
501
        }
502
#     endif
503
      if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
504
          WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
505
      }
506
      RESTORE_CANCEL(cancel_state);
507
    }
508
# endif /* GC_LINUX_THREADS */
509

510
  void * calloc(size_t n, size_t lb)
511
  {
512
    if (EXPECT((lb | n) > GC_SQRT_SIZE_MAX, FALSE) /* fast initial test */
513
        && lb && n > GC_SIZE_MAX / lb)
514
      return (*GC_get_oom_fn())(GC_SIZE_MAX); /* n*lb overflow */
515
#   if defined(GC_LINUX_THREADS)
516
      /* The linker may allocate some memory that is only pointed to by */
517
      /* mmapped thread stacks.  Make sure it is not collectible.       */
518
      {
519
        static GC_bool lib_bounds_set = FALSE;
520
        ptr_t caller = (ptr_t)__builtin_return_address(0);
521

522
        /* This test does not need to ensure memory visibility, since   */
523
        /* the bounds will be set when/if we create another thread.     */
524
        if (!EXPECT(lib_bounds_set, TRUE)) {
525
          GC_init_lib_bounds();
526
          lib_bounds_set = TRUE;
527
        }
528
        if (((word)caller >= (word)GC_libld_start
529
             && (word)caller < (word)GC_libld_end)
530
#           ifdef HAVE_LIBPTHREAD_SO
531
              || ((word)caller >= (word)GC_libpthread_start
532
                  && (word)caller < (word)GC_libpthread_end)
533
                    /* The two ranges are actually usually adjacent,    */
534
                    /* so there may be a way to speed this up.          */
535
#           endif
536
           ) {
537
          return GC_generic_malloc_uncollectable(n * lb, UNCOLLECTABLE);
538
        }
539
      }
540
#   endif
541
    return (void *)REDIRECT_MALLOC_F(n * lb);
542
  }
543

544
# ifndef strdup
545
    char *strdup(const char *s)
546
    {
547
      size_t lb = strlen(s) + 1;
548
      char *result = (char *)REDIRECT_MALLOC_F(lb);
549

550
      if (EXPECT(NULL == result, FALSE)) {
551
        errno = ENOMEM;
552
        return NULL;
553
      }
554
      BCOPY(s, result, lb);
555
      return result;
556
    }
557
# endif /* !defined(strdup) */
558
 /* If strdup is macro defined, we assume that it actually calls malloc, */
559
 /* and thus the right thing will happen even without overriding it.     */
560
 /* This seems to be true on most Linux systems.                         */
561

562
# ifndef strndup
563
    /* This is similar to strdup().     */
564
    char *strndup(const char *str, size_t size)
565
    {
566
      char *copy;
567
      size_t len = strlen(str);
568
      if (EXPECT(len > size, FALSE))
569
        len = size;
570
      copy = (char *)REDIRECT_MALLOC_F(len + 1);
571
      if (EXPECT(NULL == copy, FALSE)) {
572
        errno = ENOMEM;
573
        return NULL;
574
      }
575
      if (EXPECT(len > 0, TRUE))
576
        BCOPY(str, copy, len);
577
      copy[len] = '\0';
578
      return copy;
579
    }
580
# endif /* !strndup */
581

582
# undef GC_debug_malloc_replacement
583

584
#endif /* REDIRECT_MALLOC */
585

586
/* Explicitly deallocate the object.  hhdr should correspond to p.      */
587
static void free_internal(void *p, hdr *hhdr)
4,502,945✔
588
{
589
  size_t sz = (size_t)(hhdr -> hb_sz); /* in bytes */
4,502,945✔
590
  size_t ngranules = BYTES_TO_GRANULES(sz); /* size in granules */
4,502,945✔
591
  int k = hhdr -> hb_obj_kind;
4,502,945✔
592

593
  GC_bytes_freed += sz;
4,502,945✔
594
  if (IS_UNCOLLECTABLE(k)) GC_non_gc_bytes -= sz;
4,502,945✔
595
  if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
4,502,945✔
596
    struct obj_kind *ok = &GC_obj_kinds[k];
4,502,857✔
597
    void **flh;
598

599
    /* It is unnecessary to clear the mark bit.  If the object is       */
600
    /* reallocated, it does not matter.  Otherwise, the collector will  */
601
    /* do it, since it is on a free list.                               */
602
    if (ok -> ok_init && EXPECT(sz > sizeof(word), TRUE)) {
4,502,857✔
603
      BZERO((word *)p + 1, sz - sizeof(word));
2,756,352✔
604
    }
605

606
    flh = &(ok -> ok_freelist[ngranules]);
4,502,857✔
607
    obj_link(p) = *flh;
4,502,857✔
608
    *flh = (ptr_t)p;
4,502,857✔
609
  } else {
610
    if (sz > HBLKSIZE) {
88✔
611
      GC_large_allocd_bytes -= HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
84✔
612
    }
613
    GC_freehblk(HBLKPTR(p));
88✔
614
  }
615
}
4,502,945✔
616

617
GC_API void GC_CALL GC_free(void * p)
4,438,242✔
618
{
619
    hdr *hhdr;
620

621
    if (p /* != NULL */) {
4,438,242✔
622
        /* CPPCHECK */
623
    } else {
624
        /* Required by ANSI.  It's not my fault ...     */
625
        return;
29,442✔
626
    }
627

628
#   ifdef LOG_ALLOCS
629
      GC_log_printf("GC_free(%p) after GC #%lu\n",
630
                    p, (unsigned long)GC_gc_no);
631
#   endif
632
    hhdr = HDR(p);
4,408,800✔
633
#   if defined(REDIRECT_MALLOC) && \
634
        ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \
635
         || defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
636
         || defined(MSWIN32))
637
        /* This might be called indirectly by GC_print_callers to free  */
638
        /* the result of backtrace_symbols.                             */
639
        /* For Solaris, we have to redirect malloc calls during         */
640
        /* initialization.  For the others, this seems to happen        */
641
        /* implicitly.                                                  */
642
        /* Don't try to deallocate that memory.                         */
643
        if (EXPECT(NULL == hhdr, FALSE)) return;
644
#   endif
645
    GC_ASSERT(GC_base(p) == p);
4,409,359✔
646
    LOCK();
4,409,593✔
647
    free_internal(p, hhdr);
4,409,784✔
648
    UNLOCK();
4,409,807✔
649
}
650

651
#ifdef THREADS
652
  GC_INNER void GC_free_inner(void * p)
93,138✔
653
  {
654
    GC_ASSERT(I_HOLD_LOCK());
93,138✔
655
    free_internal(p, HDR(p));
93,138✔
656
  }
93,138✔
657
#endif /* THREADS */
658

659
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
660
# define REDIRECT_FREE GC_free
661
#endif
662

663
#if defined(REDIRECT_FREE) && !defined(REDIRECT_MALLOC_IN_HEADER)
664

665
# if defined(CPPCHECK)
666
#   define REDIRECT_FREE_F GC_free /* e.g. */
667
# else
668
#   define REDIRECT_FREE_F REDIRECT_FREE
669
# endif
670

671
  void free(void * p)
672
  {
673
#   ifndef IGNORE_FREE
674
#     if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
675
        /* Don't bother with initialization checks.  If nothing         */
676
        /* has been initialized, the check fails, and that's safe,      */
677
        /* since we have not allocated uncollectible objects neither.   */
678
        ptr_t caller = (ptr_t)__builtin_return_address(0);
679
        /* This test does not need to ensure memory visibility, since   */
680
        /* the bounds will be set when/if we create another thread.     */
681
        if (((word)caller >= (word)GC_libld_start
682
             && (word)caller < (word)GC_libld_end)
683
#           ifdef HAVE_LIBPTHREAD_SO
684
              || ((word)caller >= (word)GC_libpthread_start
685
                  && (word)caller < (word)GC_libpthread_end)
686
#           endif
687
           ) {
688
          GC_free(p);
689
          return;
690
        }
691
#     endif
692
      REDIRECT_FREE_F(p);
693
#   endif
694
  }
695
#endif /* REDIRECT_FREE */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc