• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1465

20 Mar 2023 06:58AM UTC coverage: 76.199% (-0.2%) from 76.448%
1465

push

travis-ci-com

ivmai
Fix description of client promise for IGNORE_OFF_PAGE allocated objects
(documentation)

The client should keep a pointer within the first heap block for such
an allocated object.  Previously, it was mentioned in the documentation
and comments that such a pointer should be within the first 256 or 512
bytes.

* README.md (The C Interface to the Allocator): Fix the description
(comment) of GC_malloc_ignore_off_page (the client promises to keep
a pointer within the first hblk of the object instead of 256 or 512
bytes, or a page).
* docs/gcinterface.md (GC_MALLOC_IGNORE_OFF_PAGE): Likewise.
* gc.man (GC_malloc_atomic_ignore_off_page): Likewise.
* include/gc/gc.h (GC_malloc_ignore_off_page): Likewise.
* include/gc/gc_mark.h (GC_generic_malloc_ignore_off_page): Likewise.
* include/private/gc_priv.h (IGNORE_OFF_PAGE): Likewise.
* include/private/gc_priv.h [DBG_HDRS_ALL || GC_GCJ_SUPPORT
|| !GC_NO_FINALIZATION] (GC_generic_malloc_inner_ignore_off_page):
Likewise.
* malloc.c [DBG_HDRS_ALL || GC_GCJ_SUPPORT || !GC_NO_FINALIZATION]
(GC_generic_malloc_inner_ignore_off_page): Likewise.
* include/gc/gc_gcj.h (GC_gcj_malloc_ignore_off_page): Refine comment.

7751 of 10172 relevant lines covered (76.2%)

8682664.47 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.63
/mallocx.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
6
 * Copyright (c) 2009-2022 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_priv.h"
19

20
/*
21
 * These are extra allocation routines which are likely to be less
22
 * frequently used than those in malloc.c.  They are separate in the
23
 * hope that the .o file will be excluded from statically linked
24
 * executables.  We should probably break this up further.
25
 */
26

27
#include <string.h>
28

29
#ifndef MSWINCE
30
# include <errno.h>
31
#endif
32

33
/* Some externally visible but unadvertised variables to allow access to */
34
/* free lists from inlined allocators without including gc_priv.h        */
35
/* or introducing dependencies on internal data structure layouts.       */
36
#include "private/gc_alloc_ptrs.h"
37
void ** const GC_objfreelist_ptr = GC_objfreelist;
38
void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
39
void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
40
# ifdef GC_ATOMIC_UNCOLLECTABLE
41
    void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
42
# endif
43

44
GC_API int GC_CALL GC_get_kind_and_size(const void * p, size_t * psize)
886,098✔
45
{
46
    hdr * hhdr = HDR(p);
886,098✔
47

48
    if (psize != NULL) {
886,106✔
49
        *psize = (size_t)hhdr->hb_sz;
443,052✔
50
    }
51
    return hhdr -> hb_obj_kind;
886,106✔
52
}
53

54
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_or_special_malloc(size_t lb,
947,730✔
55
                                                                  int knd)
56
{
57
    switch(knd) {
947,730✔
58
        case PTRFREE:
59
        case NORMAL:
60
            return GC_malloc_kind(lb, knd);
947,646✔
61
        case UNCOLLECTABLE:
62
#       ifdef GC_ATOMIC_UNCOLLECTABLE
63
          case AUNCOLLECTABLE:
64
#       endif
65
            return GC_generic_malloc_uncollectable(lb, knd);
84✔
66
        default:
67
            return GC_generic_malloc(lb, knd);
×
68
    }
69
}
70

71
/* Change the size of the block pointed to by p to contain at least   */
72
/* lb bytes.  The object may be (and quite likely will be) moved.     */
73
/* The kind (e.g. atomic) is the same as that of the old.             */
74
/* Shrinking of large blocks is not implemented well.                 */
75
GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
20,000,252✔
76
{
77
    struct hblk * h;
78
    hdr * hhdr;
79
    void * result;
80
#   if defined(_FORTIFY_SOURCE) && defined(__GNUC__) && !defined(__clang__)
81
      volatile  /* Use cleared_p instead of p as a workaround to avoid  */
82
                /* passing alloc_size(lb) attribute associated with p   */
83
                /* to memset (including memset call inside GC_free).    */
84
#   endif
85
      word cleared_p = (word)p;
20,000,252✔
86
    size_t sz;      /* Current size in bytes    */
87
    size_t orig_sz; /* Original sz in bytes     */
88
    int obj_kind;
89

90
    if (NULL == p) return GC_malloc(lb);  /* Required by ANSI */
20,000,252✔
91
    if (0 == lb) /* and p != NULL */ {
20,000,252✔
92
#     ifndef IGNORE_FREE
93
        GC_free(p);
×
94
#     endif
95
      return NULL;
×
96
    }
97
    h = HBLKPTR(p);
20,000,252✔
98
    hhdr = HDR(h);
20,000,252✔
99
    sz = (size_t)hhdr->hb_sz;
20,000,252✔
100
    obj_kind = hhdr -> hb_obj_kind;
20,000,252✔
101
    orig_sz = sz;
20,000,252✔
102

103
    if (sz > MAXOBJBYTES) {
20,000,252✔
104
        /* Round it up to the next whole heap block */
105
        word descr = GC_obj_kinds[obj_kind].ok_descriptor;
168✔
106

107
        sz = (sz + HBLKSIZE-1) & ~(HBLKSIZE-1);
168✔
108
        if (GC_obj_kinds[obj_kind].ok_relocate_descr)
168✔
109
          descr += sz;
168✔
110
        /* GC_realloc might be changing the block size while            */
111
        /* GC_reclaim_block or GC_clear_hdr_marks is examining it.      */
112
        /* The change to the size field is benign, in that GC_reclaim   */
113
        /* (and GC_clear_hdr_marks) would work correctly with either    */
114
        /* value, since we are not changing the number of objects in    */
115
        /* the block.  But seeing a half-updated value (though unlikely */
116
        /* to occur in practice) could be probably bad.                 */
117
        /* Using unordered atomic accesses on the size and hb_descr     */
118
        /* fields would solve the issue.  (The alternate solution might */
119
        /* be to initially overallocate large objects, so we do not     */
120
        /* have to adjust the size in GC_realloc, if they still fit.    */
121
        /* But that is probably more expensive, since we may end up     */
122
        /* scanning a bunch of zeros during GC.)                        */
123
#       ifdef AO_HAVE_store
124
          GC_STATIC_ASSERT(sizeof(hhdr->hb_sz) == sizeof(AO_t));
125
          AO_store((volatile AO_t *)&hhdr->hb_sz, (AO_t)sz);
168✔
126
          AO_store((volatile AO_t *)&hhdr->hb_descr, (AO_t)descr);
168✔
127
#       else
128
          {
129
            LOCK();
130
            hhdr -> hb_sz = sz;
131
            hhdr -> hb_descr = descr;
132
            UNLOCK();
133
          }
134
#       endif
135

136
#         ifdef MARK_BIT_PER_OBJ
137
            GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
138
#         endif
139
#         ifdef MARK_BIT_PER_GRANULE
140
            GC_ASSERT((hhdr -> hb_flags & LARGE_BLOCK) != 0
168✔
141
                        && hhdr -> hb_map[ANY_INDEX] == 1);
142
#         endif
143
          if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
168✔
144
          /* Extra area is already cleared by GC_alloc_large_and_clear. */
145
    }
146
    if (ADD_SLOP(lb) <= sz) {
20,000,252✔
147
        if (lb >= (sz >> 1)) {
20,000,084✔
148
            if (orig_sz > lb) {
19,495,574✔
149
              /* Clear unneeded part of object to avoid bogus pointer */
150
              /* tracing.                                             */
151
                BZERO((ptr_t)cleared_p + lb, orig_sz - lb);
19,495,490✔
152
            }
153
            return p;
19,495,574✔
154
        }
155
        /* shrink */
156
        sz = lb;
504,510✔
157
    }
158
    result = GC_generic_or_special_malloc((word)lb, obj_kind);
504,678✔
159
    if (EXPECT(result != NULL, TRUE)) {
504,678✔
160
      /* In case of shrink, it could also return original object.       */
161
      /* But this gives the client warning of imminent disaster.        */
162
      BCOPY(p, result, sz);
504,678✔
163
#     ifndef IGNORE_FREE
164
        GC_free((ptr_t)cleared_p);
504,678✔
165
#     endif
166
    }
167
    return result;
504,678✔
168
}
169

170
# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
171
#   define REDIRECT_REALLOC GC_realloc
172
# endif
173

174
# ifdef REDIRECT_REALLOC
175

176
/* As with malloc, avoid two levels of extra calls here.        */
177
# define GC_debug_realloc_replacement(p, lb) \
178
        GC_debug_realloc(p, lb, GC_DBG_EXTRAS)
179

180
# if !defined(REDIRECT_MALLOC_IN_HEADER)
181
    void * realloc(void * p, size_t lb)
182
    {
183
      return REDIRECT_REALLOC(p, lb);
184
    }
185
# endif
186

187
# undef GC_debug_realloc_replacement
188
# endif /* REDIRECT_REALLOC */
189

190
/* Allocate memory such that only pointers to near the          */
191
/* beginning of the object are considered.                      */
192
/* We avoid holding allocation lock while we clear the memory.  */
193
GC_API GC_ATTR_MALLOC void * GC_CALL
194
    GC_generic_malloc_ignore_off_page(size_t lb, int k)
885,774✔
195
{
196
    void *result;
197
    size_t lg;
198
    size_t lb_rounded;
199
    word n_blocks;
200
    GC_bool init;
201

202
    if (SMALL_OBJ(lb))
885,774✔
203
        return GC_generic_malloc(lb, k);
885,774✔
204
    GC_ASSERT(k < MAXOBJKINDS);
×
205
    lg = ROUNDED_UP_GRANULES(lb);
×
206
    lb_rounded = GRANULES_TO_BYTES(lg);
×
207
    n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
×
208
    init = GC_obj_kinds[k].ok_init;
×
209
    if (EXPECT(get_have_errors(), FALSE))
×
210
      GC_print_all_errors();
×
211
    GC_INVOKE_FINALIZERS();
×
212
    GC_DBG_COLLECT_AT_MALLOC(lb);
213
    LOCK();
×
214
    result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE, 0);
×
215
    if (EXPECT(NULL == result, FALSE)) {
×
216
        GC_oom_func oom_fn = GC_oom_fn;
×
217
        UNLOCK();
×
218
        return (*oom_fn)(lb);
×
219
    }
220

221
    if (GC_debugging_started) {
×
222
        BZERO(result, n_blocks * HBLKSIZE);
×
223
    } else {
224
#       ifdef THREADS
225
            /* Clear any memory that might be used for GC descriptors   */
226
            /* before we release the lock.                              */
227
            ((word *)result)[0] = 0;
×
228
            ((word *)result)[1] = 0;
×
229
            ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
×
230
            ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
×
231
#       endif
232
    }
233
    GC_bytes_allocd += lb_rounded;
×
234
    UNLOCK();
×
235
    if (init && !GC_debugging_started) {
×
236
        BZERO(result, n_blocks * HBLKSIZE);
×
237
    }
238
    return result;
×
239
}
240

241
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
442,886✔
242
{
243
    return GC_generic_malloc_ignore_off_page(lb, NORMAL);
442,886✔
244
}
245

246
GC_API GC_ATTR_MALLOC void * GC_CALL
247
    GC_malloc_atomic_ignore_off_page(size_t lb)
442,889✔
248
{
249
    return GC_generic_malloc_ignore_off_page(lb, PTRFREE);
442,889✔
250
}
251

252
/* Increment GC_bytes_allocd from code that doesn't have direct access  */
253
/* to GC_arrays.                                                        */
254
void GC_CALL GC_incr_bytes_allocd(size_t n)
42✔
255
{
256
    GC_bytes_allocd += n;
42✔
257
}
42✔
258

259
/* The same for GC_bytes_freed.                         */
260
void GC_CALL GC_incr_bytes_freed(size_t n)
42✔
261
{
262
    GC_bytes_freed += n;
42✔
263
}
42✔
264

265
GC_API size_t GC_CALL GC_get_expl_freed_bytes_since_gc(void)
2,858,800✔
266
{
267
    return (size_t)GC_bytes_freed;
2,858,800✔
268
}
269

270
# ifdef PARALLEL_MARK
271
    STATIC volatile AO_t GC_bytes_allocd_tmp = 0;
272
                        /* Number of bytes of memory allocated since    */
273
                        /* we released the GC lock.  Instead of         */
274
                        /* reacquiring the GC lock just to add this in, */
275
                        /* we add it in the next time we reacquire      */
276
                        /* the lock.  (Atomically adding it doesn't     */
277
                        /* work, since we would have to atomically      */
278
                        /* update it in GC_malloc, which is too         */
279
                        /* expensive.)                                  */
280
# endif /* PARALLEL_MARK */
281

282
/* Return a list of 1 or more objects of the indicated size, linked     */
283
/* through the first word in the object.  This has the advantage that   */
284
/* it acquires the allocation lock only once, and may greatly reduce    */
285
/* time wasted contending for the allocation lock.  Typical usage would */
286
/* be in a thread that requires many items of the same size.  It would  */
287
/* keep its own free list in thread-local storage, and call             */
288
/* GC_malloc_many or friends to replenish it.  (We do not round up      */
289
/* object sizes, since a call indicates the intention to consume many   */
290
/* objects of exactly this size.)                                       */
291
/* We assume that the size is a multiple of GRANULE_BYTES.              */
292
/* We return the free-list by assigning it to *result, since it is      */
293
/* not safe to return, e.g. a linked list of pointer-free objects,      */
294
/* since the collector would not retain the entire list if it were      */
295
/* invoked just as we were returning.                                   */
296
/* Note that the client should usually clear the link field.            */
297
GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
7,252,148✔
298
{
299
    void *op;
300
    void *p;
301
    void **opp;
302
    size_t lw;      /* Length in words.     */
303
    size_t lg;      /* Length in granules.  */
304
    signed_word my_bytes_allocd = 0;
7,252,148✔
305
    struct obj_kind * ok = &(GC_obj_kinds[k]);
7,252,148✔
306
    struct hblk ** rlh;
307

308
    GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
7,252,148✔
309
    /* Currently a single object is always allocated if manual VDB. */
310
    /* TODO: GC_dirty should be called for each linked object (but  */
311
    /* the last one) to support multiple objects allocation.        */
312
    if (!SMALL_OBJ(lb) || GC_manual_vdb) {
7,252,188✔
313
        op = GC_generic_malloc(lb, k);
90✔
314
        if (EXPECT(0 != op, TRUE))
×
315
            obj_link(op) = 0;
×
316
        *result = op;
×
317
#       ifndef GC_DISABLE_INCREMENTAL
318
          if (GC_manual_vdb && GC_is_heap_ptr(result)) {
×
319
            GC_dirty_inner(result);
×
320
            REACHABLE_AFTER_DIRTY(op);
×
321
          }
322
#       endif
323
        return;
1,200,439✔
324
    }
325
    GC_ASSERT(k < MAXOBJKINDS);
7,252,098✔
326
    lw = BYTES_TO_WORDS(lb);
7,252,098✔
327
    lg = BYTES_TO_GRANULES(lb);
7,252,098✔
328
    if (EXPECT(get_have_errors(), FALSE))
7,252,098✔
329
      GC_print_all_errors();
40✔
330
    GC_INVOKE_FINALIZERS();
7,252,098✔
331
    GC_DBG_COLLECT_AT_MALLOC(lb);
332
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
7,252,172✔
333
    LOCK();
7,252,193✔
334
    /* Do our share of marking work */
335
      if (GC_incremental && !GC_dont_gc) {
7,252,212✔
336
        ENTER_GC();
5,422,381✔
337
        GC_collect_a_little_inner(1);
5,422,381✔
338
        EXIT_GC();
5,422,381✔
339
      }
340
    /* First see if we can reclaim a page of objects waiting to be */
341
    /* reclaimed.                                                  */
342
    rlh = ok -> ok_reclaim_list;
7,252,212✔
343
    if (rlh != NULL) {
7,252,212✔
344
        struct hblk * hbp;
345
        hdr * hhdr;
346

347
        while ((hbp = rlh[lg]) != NULL) {
14,504,424✔
348
            hhdr = HDR(hbp);
997,859✔
349
            rlh[lg] = hhdr -> hb_next;
997,859✔
350
            GC_ASSERT(hhdr -> hb_sz == lb);
997,859✔
351
            hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
997,859✔
352
#           ifdef PARALLEL_MARK
353
              if (GC_parallel) {
997,859✔
354
                  signed_word my_bytes_allocd_tmp =
706,299✔
355
                                (signed_word)AO_load(&GC_bytes_allocd_tmp);
706,299✔
356
                  GC_ASSERT(my_bytes_allocd_tmp >= 0);
706,299✔
357
                  /* We only decrement it while holding the GC lock.    */
358
                  /* Thus we can't accidentally adjust it down in more  */
359
                  /* than one thread simultaneously.                    */
360

361
                  if (my_bytes_allocd_tmp != 0) {
706,299✔
362
                    (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
625,746✔
363
                                           (AO_t)(-my_bytes_allocd_tmp));
364
                    GC_bytes_allocd += my_bytes_allocd_tmp;
625,746✔
365
                  }
366
                  GC_acquire_mark_lock();
706,299✔
367
                  ++ GC_fl_builder_count;
706,299✔
368
                  UNLOCK();
706,299✔
369
                  GC_release_mark_lock();
706,300✔
370
              }
371
#           endif
372
            op = GC_reclaim_generic(hbp, hhdr, lb,
997,861✔
373
                                    ok -> ok_init, 0, &my_bytes_allocd);
374
            if (op != 0) {
997,742✔
375
#             ifdef PARALLEL_MARK
376
                if (GC_parallel) {
997,742✔
377
                  *result = op;
706,182✔
378
                  (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
706,182✔
379
                                         (AO_t)my_bytes_allocd);
380
                  GC_acquire_mark_lock();
706,182✔
381
                  -- GC_fl_builder_count;
706,302✔
382
                  if (GC_fl_builder_count == 0) GC_notify_all_builder();
706,302✔
383
#                 ifdef THREAD_SANITIZER
384
                    GC_release_mark_lock();
385
                    LOCK();
386
                    GC_bytes_found += my_bytes_allocd;
387
                    UNLOCK();
388
#                 else
389
                    GC_bytes_found += my_bytes_allocd;
706,302✔
390
                                        /* The result may be inaccurate. */
391
                    GC_release_mark_lock();
706,302✔
392
#                 endif
393
                  (void) GC_clear_stack(0);
706,302✔
394
                  return;
705,888✔
395
                }
396
#             endif
397
              /* We also reclaimed memory, so we need to adjust       */
398
              /* that count.                                          */
399
              GC_bytes_found += my_bytes_allocd;
291,560✔
400
              GC_bytes_allocd += my_bytes_allocd;
291,560✔
401
              goto out;
291,560✔
402
            }
403
#           ifdef PARALLEL_MARK
404
              if (GC_parallel) {
×
405
                GC_acquire_mark_lock();
×
406
                -- GC_fl_builder_count;
×
407
                if (GC_fl_builder_count == 0) GC_notify_all_builder();
×
408
                GC_release_mark_lock();
×
409
                LOCK();
×
410
                /* The GC lock is needed for reclaim list access.  We   */
411
                /* must decrement fl_builder_count before reacquiring   */
412
                /* the lock.  Hopefully this path is rare.              */
413

414
                rlh = ok -> ok_reclaim_list; /* reload rlh after locking */
×
415
                if (NULL == rlh) break;
×
416
              }
417
#           endif
418
        }
419
    }
420
    /* Next try to use prefix of global free list if there is one.      */
421
    /* We don't refill it, but we need to use it up before allocating   */
422
    /* a new block ourselves.                                           */
423
      opp = &(GC_obj_kinds[k].ok_freelist[lg]);
6,254,353✔
424
      if ( (op = *opp) != 0 ) {
6,254,353✔
425
        *opp = 0;
349,720✔
426
        my_bytes_allocd = 0;
349,720✔
427
        for (p = op; p != 0; p = obj_link(p)) {
11,960,902✔
428
          my_bytes_allocd += lb;
11,614,751✔
429
          if ((word)my_bytes_allocd >= HBLKSIZE) {
11,614,751✔
430
            *opp = obj_link(p);
3,569✔
431
            obj_link(p) = 0;
3,569✔
432
            break;
3,569✔
433
          }
434
        }
435
        GC_bytes_allocd += my_bytes_allocd;
349,720✔
436
        goto out;
349,720✔
437
      }
438
    /* Next try to allocate a new block worth of objects of this size.  */
439
    {
440
        struct hblk *h = GC_allochblk(lb, k, 0 /* flags */, 0 /* align_m1 */);
5,904,633✔
441

442
        if (h /* != NULL */) { /* CPPCHECK */
5,904,633✔
443
          if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
901,644✔
444
          GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
901,644✔
445
#         ifdef PARALLEL_MARK
446
            if (GC_parallel) {
901,644✔
447
              GC_acquire_mark_lock();
494,711✔
448
              ++ GC_fl_builder_count;
494,711✔
449
              UNLOCK();
494,711✔
450
              GC_release_mark_lock();
494,711✔
451

452
              op = GC_build_fl(h, lw,
529,798✔
453
                        (ok -> ok_init || GC_debugging_started), 0);
529,798✔
454

455
              *result = op;
494,707✔
456
              GC_acquire_mark_lock();
494,707✔
457
              -- GC_fl_builder_count;
494,711✔
458
              if (GC_fl_builder_count == 0) GC_notify_all_builder();
494,711✔
459
              GC_release_mark_lock();
494,711✔
460
              (void) GC_clear_stack(0);
494,711✔
461
              return;
494,687✔
462
            }
463
#         endif
464
          op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
406,933✔
465
          goto out;
406,933✔
466
        }
467
    }
468

469
    /* As a last attempt, try allocating a single object.  Note that    */
470
    /* this may trigger a collection or expand the heap.                */
471
      op = GC_generic_malloc_inner(lb, k);
5,002,989✔
472
      if (0 != op) obj_link(op) = 0;
5,002,989✔
473

474
  out:
475
    *result = op;
6,051,202✔
476
    UNLOCK();
6,051,202✔
477
    (void) GC_clear_stack(0);
6,051,201✔
478
}
479

480
/* Note that the "atomic" version of this would be unsafe, since the    */
481
/* links would not be seen by the collector.                            */
482
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t lb)
3,881,923✔
483
{
484
    void *result;
485

486
    /* Add EXTRA_BYTES and round up to a multiple of a granule. */
487
    lb = SIZET_SAT_ADD(lb, EXTRA_BYTES + GRANULE_BYTES - 1)
11,645,769✔
488
            & ~(GRANULE_BYTES - 1);
7,763,846✔
489

490
    GC_generic_malloc_many(lb, NORMAL, &result);
3,881,923✔
491
    return result;
3,881,915✔
492
}
493

494
/* TODO: The debugging version of GC_memalign and friends is tricky     */
495
/* and currently missing.  There are 2 major difficulties:              */
496
/* - GC_base() should always point to the beginning of the allocated    */
497
/* block (thus, for small objects allocation we should probably         */
498
/* iterate over the list of free objects to find the one with the       */
499
/* suitable alignment);                                                 */
500
/* - store_debug_info() should return the pointer of the object with    */
501
/* the requested alignment (unlike the object header).                  */
502

503
GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb)
630✔
504
{
505
    size_t offset;
506
    ptr_t result;
507
    size_t align_m1 = align - 1;
630✔
508

509
    /* Check the alignment argument.    */
510
    if (align < sizeof(void *) || (align & align_m1) != 0) return NULL;
630✔
511

512
    if (align <= GRANULE_BYTES) return GC_malloc(lb);
630✔
513

514
    if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
546✔
515
      return GC_clear_stack(GC_generic_malloc_aligned(lb, NORMAL, align_m1));
252✔
516
    }
517

518
    /* We could also try to make sure that the real rounded-up object size */
519
    /* is a multiple of align.  That would be correct up to HBLKSIZE.      */
520
    /* TODO: Not space efficient for big align values. */
521
    result = (ptr_t)GC_malloc(SIZET_SAT_ADD(lb, align_m1));
294✔
522
            /* It is OK not to check result for NULL as in that case    */
523
            /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */
524
    offset = (size_t)(word)result & align_m1;
294✔
525
    if (offset != 0) {
294✔
526
        offset = align - offset;
198✔
527
        if (!GC_all_interior_pointers) {
198✔
528
            GC_STATIC_ASSERT(VALID_OFFSET_SZ <= HBLKSIZE);
529
            GC_ASSERT(offset < VALID_OFFSET_SZ);
×
530
            GC_register_displacement(offset);
×
531
        }
532
        result += offset;
198✔
533
    }
534
    GC_ASSERT(((word)result & align_m1) == 0);
294✔
535
    return result;
294✔
536
}
537

538
/* This one exists largely to redirect posix_memalign for leaks finding. */
539
GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
42✔
540
{
541
  /* Check alignment properly.  */
542
  size_t align_minus_one = align - 1; /* to workaround a cppcheck warning */
42✔
543
  if (align < sizeof(void *) || (align_minus_one & align) != 0) {
42✔
544
#   ifdef MSWINCE
545
      return ERROR_INVALID_PARAMETER;
546
#   else
547
      return EINVAL;
×
548
#   endif
549
  }
550

551
  if ((*memptr = GC_memalign(align, lb)) == NULL) {
42✔
552
#   ifdef MSWINCE
553
      return ERROR_NOT_ENOUGH_MEMORY;
554
#   else
555
      return ENOMEM;
×
556
#   endif
557
  }
558
  return 0;
42✔
559
}
560

561
#ifndef GC_NO_VALLOC
562
  GC_API GC_ATTR_MALLOC void * GC_CALL GC_valloc(size_t lb)
42✔
563
  {
564
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
42✔
565
    GC_ASSERT(GC_real_page_size != 0);
42✔
566
    return GC_memalign(GC_real_page_size, lb);
42✔
567
  }
568

569
  GC_API GC_ATTR_MALLOC void * GC_CALL GC_pvalloc(size_t lb)
42✔
570
  {
571
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
42✔
572
    GC_ASSERT(GC_real_page_size != 0);
42✔
573
    lb = SIZET_SAT_ADD(lb, GC_real_page_size - 1) & ~(GC_real_page_size - 1);
42✔
574
    return GC_memalign(GC_real_page_size, lb);
42✔
575
  }
576
#endif /* !GC_NO_VALLOC */
577

578
/* Provide a version of strdup() that uses the collector to allocate    */
579
/* the copy of the string.                                              */
580
GC_API GC_ATTR_MALLOC char * GC_CALL GC_strdup(const char *s)
21,984✔
581
{
582
  char *copy;
583
  size_t lb;
584
  if (s == NULL) return NULL;
21,984✔
585
  lb = strlen(s) + 1;
21,984✔
586
  copy = (char *)GC_malloc_atomic(lb);
21,984✔
587
  if (EXPECT(NULL == copy, FALSE)) {
21,987✔
588
#   ifndef MSWINCE
589
      errno = ENOMEM;
×
590
#   endif
591
    return NULL;
×
592
  }
593
  BCOPY(s, copy, lb);
21,987✔
594
  return copy;
21,987✔
595
}
596

597
GC_API GC_ATTR_MALLOC char * GC_CALL GC_strndup(const char *str, size_t size)
42✔
598
{
599
  char *copy;
600
  size_t len = strlen(str); /* str is expected to be non-NULL  */
42✔
601
  if (EXPECT(len > size, FALSE))
42✔
602
    len = size;
42✔
603
  copy = (char *)GC_malloc_atomic(len + 1);
42✔
604
  if (EXPECT(NULL == copy, FALSE)) {
42✔
605
#   ifndef MSWINCE
606
      errno = ENOMEM;
×
607
#   endif
608
    return NULL;
×
609
  }
610
  if (EXPECT(len > 0, TRUE))
42✔
611
    BCOPY(str, copy, len);
42✔
612
  copy[len] = '\0';
42✔
613
  return copy;
42✔
614
}
615

616
#ifdef GC_REQUIRE_WCSDUP
617
# include <wchar.h> /* for wcslen() */
618

619
  GC_API GC_ATTR_MALLOC wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
42✔
620
  {
621
    size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
42✔
622
    wchar_t *copy = (wchar_t *)GC_malloc_atomic(lb);
42✔
623

624
    if (EXPECT(NULL == copy, FALSE)) {
42✔
625
#     ifndef MSWINCE
626
        errno = ENOMEM;
×
627
#     endif
628
      return NULL;
×
629
    }
630
    BCOPY(str, copy, lb);
42✔
631
    return copy;
42✔
632
  }
633
#endif /* GC_REQUIRE_WCSDUP */
634

635
#ifndef CPPCHECK
636
  GC_API void * GC_CALL GC_malloc_stubborn(size_t lb)
×
637
  {
638
    return GC_malloc(lb);
×
639
  }
640

641
  GC_API void GC_CALL GC_change_stubborn(const void *p)
×
642
  {
643
    UNUSED_ARG(p);
644
  }
×
645
#endif /* !CPPCHECK */
646

647
GC_API void GC_CALL GC_end_stubborn_change(const void *p)
144,611,787✔
648
{
649
  GC_dirty(p); /* entire object */
144,611,787✔
650
}
144,611,787✔
651

652
GC_API void GC_CALL GC_ptr_store_and_dirty(void *p, const void *q)
117,271,453✔
653
{
654
  *(const void **)p = q;
117,271,453✔
655
  GC_dirty(p);
117,271,453✔
656
  REACHABLE_AFTER_DIRTY(q);
117,271,453✔
657
}
117,603,286✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc