• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1484

12 Apr 2023 08:54AM UTC coverage: 76.502% (+0.07%) from 76.429%
1484

push

travis-ci-com

ivmai
Do not add extra byte to large ignore-off-page objects

For ignore-off-page objects the client should guarantee the pointer
within the first heap block of the object, thus no need to add an extra
byte for such objects if the object size of at least one heap block.

* allchblk.c (setup_header): Add assertion that byte_sz is not less
than ALIGNMENT.
* allchblk.c [ALIGNMENT>GC_DS_TAGS] (setup_header): Modify descr local
variable to make it zero if IGNORE_OFF_PAGE flag is set and kind is
NORMAL (and object size is not less than HBLKSIZE); add comment.
* mallocx.c [ALIGNMENT>GC_DS_TAGS] (GC_realloc): Likewise.
* include/gc/gc.h (GC_all_interior_pointers): Update comment.
* include/private/gc_priv.h [MAX_EXTRA_BYTES==0] (ADD_EXTRA_BYTES):
Define as no-op.
* malloc.c (GC_generic_malloc_inner): Define lb_adjusted local
variable; pass lb_adjusted to GC_alloc_large_and_clear().
* malloc.c [MAX_EXTRA_BYTES>0] (GC_generic_malloc_inner): Set
lb_adjusted to lb if IGNORE_OFF_PAGE flag is set and lb is not less
than HBLKSIZE.
* malloc.c [MAX_EXTRA_BYTES>0] (GC_generic_malloc_aligned): Set
lb_rounded without EXTRA_BYTES added (and compute lg based on
lb_rounded) if IGNORE_OFF_PAGE is set and lb is not less than HBLKSIZE.
* mallocx.c (GC_realloc): Define ok local variable.
* typd_mlc.c (GC_malloc_explicitly_typed_ignore_off_page): Remove
lb_adjusted local variable; call GC_malloc_explicitly_typed() if
lb is smaller than HBLKSIZE-sizeof(word), otherwise pass lb plus
sizeof(word) (instead of lb plus TYPD_EXTRA_BYTES) to
GC_generic_malloc_aligned; add comment.

24 of 24 new or added lines in 4 files covered. (100.0%)

7765 of 10150 relevant lines covered (76.5%)

8458785.63 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.58
/mallocx.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
6
 * Copyright (c) 2009-2022 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_priv.h"
19

20
/*
21
 * These are extra allocation routines which are likely to be less
22
 * frequently used than those in malloc.c.  They are separate in the
23
 * hope that the .o file will be excluded from statically linked
24
 * executables.  We should probably break this up further.
25
 */
26

27
#include <string.h>
28

29
#ifndef MSWINCE
30
# include <errno.h>
31
#endif
32

33
/* Some externally visible but unadvertised variables to allow access to */
34
/* free lists from inlined allocators without including gc_priv.h        */
35
/* or introducing dependencies on internal data structure layouts.       */
36
#include "private/gc_alloc_ptrs.h"
37
void ** const GC_objfreelist_ptr = GC_objfreelist;
38
void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
39
void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
40
# ifdef GC_ATOMIC_UNCOLLECTABLE
41
    void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
42
# endif
43

44
GC_API int GC_CALL GC_get_kind_and_size(const void * p, size_t * psize)
846,348✔
45
{
46
    hdr * hhdr = HDR(p);
846,348✔
47

48
    if (psize != NULL) {
846,508✔
49
        *psize = (size_t)hhdr->hb_sz;
423,248✔
50
    }
51
    return hhdr -> hb_obj_kind;
846,508✔
52
}
53

54
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_or_special_malloc(size_t lb,
903,414✔
55
                                                                  int k)
56
{
57
    switch (k) {
903,414✔
58
        case PTRFREE:
59
        case NORMAL:
60
            return GC_malloc_kind(lb, k);
903,329✔
61
        case UNCOLLECTABLE:
62
#       ifdef GC_ATOMIC_UNCOLLECTABLE
63
          case AUNCOLLECTABLE:
64
#       endif
65
            return GC_generic_malloc_uncollectable(lb, k);
85✔
66
        default:
67
            return GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0);
×
68
    }
69
}
70

71
/* Change the size of the block pointed to by p to contain at least   */
72
/* lb bytes.  The object may be (and quite likely will be) moved.     */
73
/* The kind (e.g. atomic) is the same as that of the old.             */
74
/* Shrinking of large blocks is not implemented well.                 */
75
GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
20,000,255✔
76
{
77
    struct hblk * h;
78
    hdr * hhdr;
79
    void * result;
80
#   if defined(_FORTIFY_SOURCE) && defined(__GNUC__) && !defined(__clang__)
81
      volatile  /* Use cleared_p instead of p as a workaround to avoid  */
82
                /* passing alloc_size(lb) attribute associated with p   */
83
                /* to memset (including memset call inside GC_free).    */
84
#   endif
85
      word cleared_p = (word)p;
20,000,255✔
86
    size_t sz;      /* Current size in bytes    */
87
    size_t orig_sz; /* Original sz in bytes     */
88
    int obj_kind;
89

90
    if (NULL == p) return GC_malloc(lb);  /* Required by ANSI */
20,000,255✔
91
    if (0 == lb) /* and p != NULL */ {
20,000,255✔
92
#     ifndef IGNORE_FREE
93
        GC_free(p);
×
94
#     endif
95
      return NULL;
×
96
    }
97
    h = HBLKPTR(p);
20,000,255✔
98
    hhdr = HDR(h);
20,000,255✔
99
    sz = (size_t)hhdr->hb_sz;
20,000,255✔
100
    obj_kind = hhdr -> hb_obj_kind;
20,000,255✔
101
    orig_sz = sz;
20,000,255✔
102

103
    if (sz > MAXOBJBYTES) {
20,000,255✔
104
        struct obj_kind * ok = &GC_obj_kinds[obj_kind];
170✔
105
        word descr = ok -> ok_descriptor;
170✔
106

107
        /* Round it up to the next whole heap block.    */
108
        sz = (sz + HBLKSIZE-1) & ~(HBLKSIZE-1);
170✔
109
#       if ALIGNMENT > GC_DS_TAGS
110
          /* An extra byte is not added in case of ignore-off-page  */
111
          /* allocated objects not smaller than HBLKSIZE.           */
112
          GC_ASSERT(sz >= HBLKSIZE);
170✔
113
          if (EXTRA_BYTES != 0 && (hhdr -> hb_flags & IGNORE_OFF_PAGE) != 0
170✔
114
              && obj_kind == NORMAL)
×
115
            descr += ALIGNMENT; /* or set to 0 */
×
116
#       endif
117
        if (ok -> ok_relocate_descr)
170✔
118
          descr += sz;
170✔
119
        /* GC_realloc might be changing the block size while            */
120
        /* GC_reclaim_block or GC_clear_hdr_marks is examining it.      */
121
        /* The change to the size field is benign, in that GC_reclaim   */
122
        /* (and GC_clear_hdr_marks) would work correctly with either    */
123
        /* value, since we are not changing the number of objects in    */
124
        /* the block.  But seeing a half-updated value (though unlikely */
125
        /* to occur in practice) could be probably bad.                 */
126
        /* Using unordered atomic accesses on the size and hb_descr     */
127
        /* fields would solve the issue.  (The alternate solution might */
128
        /* be to initially overallocate large objects, so we do not     */
129
        /* have to adjust the size in GC_realloc, if they still fit.    */
130
        /* But that is probably more expensive, since we may end up     */
131
        /* scanning a bunch of zeros during GC.)                        */
132
#       ifdef AO_HAVE_store
133
          GC_STATIC_ASSERT(sizeof(hhdr->hb_sz) == sizeof(AO_t));
134
          AO_store((volatile AO_t *)&hhdr->hb_sz, (AO_t)sz);
170✔
135
          AO_store((volatile AO_t *)&hhdr->hb_descr, (AO_t)descr);
170✔
136
#       else
137
          {
138
            LOCK();
139
            hhdr -> hb_sz = sz;
140
            hhdr -> hb_descr = descr;
141
            UNLOCK();
142
          }
143
#       endif
144

145
#         ifdef MARK_BIT_PER_OBJ
146
            GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
147
#         endif
148
#         ifdef MARK_BIT_PER_GRANULE
149
            GC_ASSERT((hhdr -> hb_flags & LARGE_BLOCK) != 0
170✔
150
                        && hhdr -> hb_map[ANY_INDEX] == 1);
151
#         endif
152
          if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
170✔
153
          /* Extra area is already cleared by GC_alloc_large_and_clear. */
154
    }
155
    if (ADD_EXTRA_BYTES(lb) <= sz) {
20,000,255✔
156
        if (lb >= (sz >> 1)) {
20,000,085✔
157
            if (orig_sz > lb) {
19,520,109✔
158
              /* Clear unneeded part of object to avoid bogus pointer */
159
              /* tracing.                                             */
160
                BZERO((ptr_t)cleared_p + lb, orig_sz - lb);
19,520,024✔
161
            }
162
            return p;
19,520,109✔
163
        }
164
        /* shrink */
165
        sz = lb;
479,976✔
166
    }
167
    result = GC_generic_or_special_malloc((word)lb, obj_kind);
480,146✔
168
    if (EXPECT(result != NULL, TRUE)) {
480,146✔
169
      /* In case of shrink, it could also return original object.       */
170
      /* But this gives the client warning of imminent disaster.        */
171
      BCOPY(p, result, sz);
480,146✔
172
#     ifndef IGNORE_FREE
173
        GC_free((ptr_t)cleared_p);
480,146✔
174
#     endif
175
    }
176
    return result;
480,146✔
177
}
178

179
# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
180
#   define REDIRECT_REALLOC GC_realloc
181
# endif
182

183
# ifdef REDIRECT_REALLOC
184

185
/* As with malloc, avoid two levels of extra calls here.        */
186
# define GC_debug_realloc_replacement(p, lb) \
187
        GC_debug_realloc(p, lb, GC_DBG_EXTRAS)
188

189
# if !defined(REDIRECT_MALLOC_IN_HEADER)
190
    void * realloc(void * p, size_t lb)
191
    {
192
      return REDIRECT_REALLOC(p, lb);
193
    }
194
# endif
195

196
# undef GC_debug_realloc_replacement
197
# endif /* REDIRECT_REALLOC */
198

199
/* Allocate memory such that only pointers to near the          */
200
/* beginning of the object are considered.                      */
201
/* We avoid holding allocation lock while we clear the memory.  */
202
GC_API GC_ATTR_MALLOC void * GC_CALL
203
    GC_generic_malloc_ignore_off_page(size_t lb, int k)
423,110✔
204
{
205
  return GC_generic_malloc_aligned(lb, k, IGNORE_OFF_PAGE, 0 /* align_m1 */);
423,110✔
206
}
207

208
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
423,095✔
209
{
210
    return GC_generic_malloc_aligned(lb, NORMAL, IGNORE_OFF_PAGE, 0);
423,095✔
211
}
212

213
GC_API GC_ATTR_MALLOC void * GC_CALL
214
    GC_malloc_atomic_ignore_off_page(size_t lb)
423,119✔
215
{
216
    return GC_generic_malloc_aligned(lb, PTRFREE, IGNORE_OFF_PAGE, 0);
423,119✔
217
}
218

219
/* Increment GC_bytes_allocd from code that doesn't have direct access  */
220
/* to GC_arrays.                                                        */
221
void GC_CALL GC_incr_bytes_allocd(size_t n)
42✔
222
{
223
    GC_bytes_allocd += n;
42✔
224
}
42✔
225

226
/* The same for GC_bytes_freed.                         */
227
void GC_CALL GC_incr_bytes_freed(size_t n)
42✔
228
{
229
    GC_bytes_freed += n;
42✔
230
}
42✔
231

232
GC_API size_t GC_CALL GC_get_expl_freed_bytes_since_gc(void)
2,858,800✔
233
{
234
    return (size_t)GC_bytes_freed;
2,858,800✔
235
}
236

237
# ifdef PARALLEL_MARK
238
    STATIC volatile AO_t GC_bytes_allocd_tmp = 0;
239
                        /* Number of bytes of memory allocated since    */
240
                        /* we released the GC lock.  Instead of         */
241
                        /* reacquiring the GC lock just to add this in, */
242
                        /* we add it in the next time we reacquire      */
243
                        /* the lock.  (Atomically adding it doesn't     */
244
                        /* work, since we would have to atomically      */
245
                        /* update it in GC_malloc, which is too         */
246
                        /* expensive.)                                  */
247
# endif /* PARALLEL_MARK */
248

249
/* Return a list of 1 or more objects of the indicated size, linked     */
250
/* through the first word in the object.  This has the advantage that   */
251
/* it acquires the allocation lock only once, and may greatly reduce    */
252
/* time wasted contending for the allocation lock.  Typical usage would */
253
/* be in a thread that requires many items of the same size.  It would  */
254
/* keep its own free list in thread-local storage, and call             */
255
/* GC_malloc_many or friends to replenish it.  (We do not round up      */
256
/* object sizes, since a call indicates the intention to consume many   */
257
/* objects of exactly this size.)                                       */
258
/* We assume that the size is a multiple of GRANULE_BYTES.              */
259
/* We return the free-list by assigning it to *result, since it is      */
260
/* not safe to return, e.g. a linked list of pointer-free objects,      */
261
/* since the collector would not retain the entire list if it were      */
262
/* invoked just as we were returning.                                   */
263
/* Note that the client should usually clear the link field.            */
264
GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
3,983,875✔
265
{
266
    void *op;
267
    void *p;
268
    void **opp;
269
    size_t lw;      /* Length in words.     */
270
    size_t lg;      /* Length in granules.  */
271
    signed_word my_bytes_allocd = 0;
3,983,875✔
272
    struct obj_kind * ok;
273
    struct hblk ** rlh;
274

275
    GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
3,983,875✔
276
    /* Currently a single object is always allocated if manual VDB. */
277
    /* TODO: GC_dirty should be called for each linked object (but  */
278
    /* the last one) to support multiple objects allocation.        */
279
    if (!SMALL_OBJ(lb) || GC_manual_vdb) {
3,983,916✔
280
        op = GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0 /* align_m1 */);
49✔
281
        if (EXPECT(0 != op, TRUE))
×
282
            obj_link(op) = 0;
×
283
        *result = op;
×
284
#       ifndef GC_DISABLE_INCREMENTAL
285
          if (GC_manual_vdb && GC_is_heap_ptr(result)) {
×
286
            GC_dirty_inner(result);
×
287
            REACHABLE_AFTER_DIRTY(op);
×
288
          }
289
#       endif
290
        return;
1,264,111✔
291
    }
292
    GC_ASSERT(k < MAXOBJKINDS);
3,983,867✔
293
    lw = BYTES_TO_WORDS(lb);
3,983,867✔
294
    lg = BYTES_TO_GRANULES(lb);
3,983,867✔
295
    if (EXPECT(get_have_errors(), FALSE))
3,983,867✔
296
      GC_print_all_errors();
40✔
297
    GC_INVOKE_FINALIZERS();
3,983,867✔
298
    GC_DBG_COLLECT_AT_MALLOC(lb);
299
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
3,983,896✔
300
    LOCK();
3,983,919✔
301
    /* Do our share of marking work */
302
      if (GC_incremental && !GC_dont_gc) {
3,983,943✔
303
        ENTER_GC();
2,203,523✔
304
        GC_collect_a_little_inner(1);
2,203,523✔
305
        EXIT_GC();
2,203,523✔
306
      }
307
    /* First see if we can reclaim a page of objects waiting to be */
308
    /* reclaimed.                                                  */
309
    ok = &GC_obj_kinds[k];
3,983,943✔
310
    rlh = ok -> ok_reclaim_list;
3,983,943✔
311
    if (rlh != NULL) {
3,983,943✔
312
        struct hblk * hbp;
313
        hdr * hhdr;
314

315
        while ((hbp = rlh[lg]) != NULL) {
7,967,890✔
316
            hhdr = HDR(hbp);
1,040,113✔
317
            rlh[lg] = hhdr -> hb_next;
1,040,113✔
318
            GC_ASSERT(hhdr -> hb_sz == lb);
1,040,113✔
319
            hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
1,040,113✔
320
#           ifdef PARALLEL_MARK
321
              if (GC_parallel) {
1,040,113✔
322
                  signed_word my_bytes_allocd_tmp =
749,302✔
323
                                (signed_word)AO_load(&GC_bytes_allocd_tmp);
749,302✔
324
                  GC_ASSERT(my_bytes_allocd_tmp >= 0);
749,302✔
325
                  /* We only decrement it while holding the GC lock.    */
326
                  /* Thus we can't accidentally adjust it down in more  */
327
                  /* than one thread simultaneously.                    */
328

329
                  if (my_bytes_allocd_tmp != 0) {
749,302✔
330
                    (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
668,230✔
331
                                           (AO_t)(-my_bytes_allocd_tmp));
332
                    GC_bytes_allocd += my_bytes_allocd_tmp;
668,230✔
333
                  }
334
                  GC_acquire_mark_lock();
749,302✔
335
                  ++ GC_fl_builder_count;
749,302✔
336
                  UNLOCK();
749,302✔
337
                  GC_release_mark_lock();
749,302✔
338
              }
339
#           endif
340
            op = GC_reclaim_generic(hbp, hhdr, lb,
1,040,111✔
341
                                    ok -> ok_init, 0, &my_bytes_allocd);
342
            if (op != 0) {
1,040,025✔
343
#             ifdef PARALLEL_MARK
344
                if (GC_parallel) {
1,040,021✔
345
                  *result = op;
749,210✔
346
                  (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
749,210✔
347
                                         (AO_t)my_bytes_allocd);
348
                  GC_acquire_mark_lock();
749,210✔
349
                  -- GC_fl_builder_count;
749,298✔
350
                  if (GC_fl_builder_count == 0) GC_notify_all_builder();
749,298✔
351
#                 ifdef THREAD_SANITIZER
352
                    GC_release_mark_lock();
353
                    LOCK();
354
                    GC_bytes_found += my_bytes_allocd;
355
                    UNLOCK();
356
#                 else
357
                    GC_bytes_found += my_bytes_allocd;
749,298✔
358
                                        /* The result may be inaccurate. */
359
                    GC_release_mark_lock();
749,298✔
360
#                 endif
361
                  (void) GC_clear_stack(0);
749,298✔
362
                  return;
748,919✔
363
                }
364
#             endif
365
              /* We also reclaimed memory, so we need to adjust       */
366
              /* that count.                                          */
367
              GC_bytes_found += my_bytes_allocd;
290,811✔
368
              GC_bytes_allocd += my_bytes_allocd;
290,811✔
369
              goto out;
290,811✔
370
            }
371
#           ifdef PARALLEL_MARK
372
              if (GC_parallel) {
4✔
373
                GC_acquire_mark_lock();
4✔
374
                -- GC_fl_builder_count;
4✔
375
                if (GC_fl_builder_count == 0) GC_notify_all_builder();
4✔
376
                GC_release_mark_lock();
4✔
377
                LOCK();
4✔
378
                /* The GC lock is needed for reclaim list access.  We   */
379
                /* must decrement fl_builder_count before reacquiring   */
380
                /* the lock.  Hopefully this path is rare.              */
381

382
                rlh = ok -> ok_reclaim_list; /* reload rlh after locking */
4✔
383
                if (NULL == rlh) break;
4✔
384
              }
385
#           endif
386
        }
387
    }
388
    /* Next try to use prefix of global free list if there is one.      */
389
    /* We don't refill it, but we need to use it up before allocating   */
390
    /* a new block ourselves.                                           */
391
      opp = &(ok -> ok_freelist[lg]);
2,943,834✔
392
      if ((op = *opp) != NULL) {
2,943,834✔
393
        *opp = 0;
332,555✔
394
        my_bytes_allocd = 0;
332,555✔
395
        for (p = op; p != 0; p = obj_link(p)) {
10,491,437✔
396
          my_bytes_allocd += lb;
10,158,964✔
397
          if ((word)my_bytes_allocd >= HBLKSIZE) {
10,158,964✔
398
            *opp = obj_link(p);
82✔
399
            obj_link(p) = 0;
82✔
400
            break;
82✔
401
          }
402
        }
403
        GC_bytes_allocd += my_bytes_allocd;
332,555✔
404
        goto out;
332,555✔
405
      }
406
    /* Next try to allocate a new block worth of objects of this size.  */
407
    {
408
        struct hblk *h = GC_allochblk(lb, k, 0 /* flags */, 0 /* align_m1 */);
2,611,279✔
409

410
        if (h /* != NULL */) { /* CPPCHECK */
2,611,279✔
411
          if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
921,353✔
412
          GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
921,353✔
413
#         ifdef PARALLEL_MARK
414
            if (GC_parallel) {
921,353✔
415
              GC_acquire_mark_lock();
515,232✔
416
              ++ GC_fl_builder_count;
515,232✔
417
              UNLOCK();
515,232✔
418
              GC_release_mark_lock();
515,232✔
419

420
              op = GC_build_fl(h, lw,
549,832✔
421
                        (ok -> ok_init || GC_debugging_started), 0);
549,832✔
422

423
              *result = op;
515,228✔
424
              GC_acquire_mark_lock();
515,228✔
425
              -- GC_fl_builder_count;
515,232✔
426
              if (GC_fl_builder_count == 0) GC_notify_all_builder();
515,232✔
427
              GC_release_mark_lock();
515,232✔
428
              (void) GC_clear_stack(0);
515,232✔
429
              return;
515,200✔
430
            }
431
#         endif
432
          op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
406,121✔
433
          goto out;
406,121✔
434
        }
435
    }
436

437
    /* As a last attempt, try allocating a single object.  Note that    */
438
    /* this may trigger a collection or expand the heap.                */
439
      op = GC_generic_malloc_inner(lb, k, 0 /* flags */);
1,689,926✔
440
      if (op != NULL) obj_link(op) = NULL;
1,689,926✔
441

442
  out:
443
    *result = op;
2,719,413✔
444
    UNLOCK();
2,719,413✔
445
    (void) GC_clear_stack(0);
2,719,413✔
446
}
447

448
/* Note that the "atomic" version of this would be unsafe, since the    */
449
/* links would not be seen by the collector.                            */
450
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t lb)
526,968✔
451
{
452
    void *result;
453
    size_t lg = ALLOC_REQUEST_GRANS(lb);
526,968✔
454

455
    GC_generic_malloc_many(GRANULES_TO_BYTES(lg), NORMAL, &result);
526,968✔
456
    return result;
526,953✔
457
}
458

459
/* TODO: The debugging version of GC_memalign and friends is tricky     */
460
/* and currently missing.  There are 2 major difficulties:              */
461
/* - GC_base() should always point to the beginning of the allocated    */
462
/* block (thus, for small objects allocation we should probably         */
463
/* iterate over the list of free objects to find the one with the       */
464
/* suitable alignment);                                                 */
465
/* - store_debug_info() should return the pointer of the object with    */
466
/* the requested alignment (unlike the object header).                  */
467

468
GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb)
630✔
469
{
470
    size_t offset;
471
    ptr_t result;
472
    size_t align_m1 = align - 1;
630✔
473

474
    /* Check the alignment argument.    */
475
    if (align < sizeof(void *) || (align & align_m1) != 0) return NULL;
630✔
476

477
    if (align <= GRANULE_BYTES) return GC_malloc(lb);
630✔
478

479
    if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
546✔
480
      return GC_clear_stack(GC_generic_malloc_aligned(lb, NORMAL,
252✔
481
                                        0 /* flags */, align_m1));
482
    }
483

484
    /* We could also try to make sure that the real rounded-up object size */
485
    /* is a multiple of align.  That would be correct up to HBLKSIZE.      */
486
    /* TODO: Not space efficient for big align values. */
487
    result = (ptr_t)GC_malloc(SIZET_SAT_ADD(lb, align_m1));
294✔
488
            /* It is OK not to check result for NULL as in that case    */
489
            /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */
490
    offset = (size_t)(word)result & align_m1;
294✔
491
    if (offset != 0) {
294✔
492
        offset = align - offset;
198✔
493
        if (!GC_all_interior_pointers) {
198✔
494
            GC_STATIC_ASSERT(VALID_OFFSET_SZ <= HBLKSIZE);
495
            GC_ASSERT(offset < VALID_OFFSET_SZ);
×
496
            GC_register_displacement(offset);
×
497
        }
498
        result += offset;
198✔
499
    }
500
    GC_ASSERT(((word)result & align_m1) == 0);
294✔
501
    return result;
294✔
502
}
503

504
/* This one exists largely to redirect posix_memalign for leaks finding. */
505
GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
42✔
506
{
507
  /* Check alignment properly.  */
508
  size_t align_minus_one = align - 1; /* to workaround a cppcheck warning */
42✔
509
  if (align < sizeof(void *) || (align_minus_one & align) != 0) {
42✔
510
#   ifdef MSWINCE
511
      return ERROR_INVALID_PARAMETER;
512
#   else
513
      return EINVAL;
×
514
#   endif
515
  }
516

517
  if ((*memptr = GC_memalign(align, lb)) == NULL) {
42✔
518
#   ifdef MSWINCE
519
      return ERROR_NOT_ENOUGH_MEMORY;
520
#   else
521
      return ENOMEM;
×
522
#   endif
523
  }
524
  return 0;
42✔
525
}
526

527
#ifndef GC_NO_VALLOC
528
  GC_API GC_ATTR_MALLOC void * GC_CALL GC_valloc(size_t lb)
42✔
529
  {
530
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
42✔
531
    GC_ASSERT(GC_real_page_size != 0);
42✔
532
    return GC_memalign(GC_real_page_size, lb);
42✔
533
  }
534

535
  GC_API GC_ATTR_MALLOC void * GC_CALL GC_pvalloc(size_t lb)
42✔
536
  {
537
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
42✔
538
    GC_ASSERT(GC_real_page_size != 0);
42✔
539
    lb = SIZET_SAT_ADD(lb, GC_real_page_size - 1) & ~(GC_real_page_size - 1);
42✔
540
    return GC_memalign(GC_real_page_size, lb);
42✔
541
  }
542
#endif /* !GC_NO_VALLOC */
543

544
/* Provide a version of strdup() that uses the collector to allocate    */
545
/* the copy of the string.                                              */
546
GC_API GC_ATTR_MALLOC char * GC_CALL GC_strdup(const char *s)
21,869✔
547
{
548
  char *copy;
549
  size_t lb;
550
  if (s == NULL) return NULL;
21,869✔
551
  lb = strlen(s) + 1;
21,869✔
552
  copy = (char *)GC_malloc_atomic(lb);
21,869✔
553
  if (EXPECT(NULL == copy, FALSE)) {
21,869✔
554
#   ifndef MSWINCE
555
      errno = ENOMEM;
×
556
#   endif
557
    return NULL;
×
558
  }
559
  BCOPY(s, copy, lb);
21,869✔
560
  return copy;
21,869✔
561
}
562

563
GC_API GC_ATTR_MALLOC char * GC_CALL GC_strndup(const char *str, size_t size)
42✔
564
{
565
  char *copy;
566
  size_t len = strlen(str); /* str is expected to be non-NULL  */
42✔
567
  if (EXPECT(len > size, FALSE))
42✔
568
    len = size;
42✔
569
  copy = (char *)GC_malloc_atomic(len + 1);
42✔
570
  if (EXPECT(NULL == copy, FALSE)) {
42✔
571
#   ifndef MSWINCE
572
      errno = ENOMEM;
×
573
#   endif
574
    return NULL;
×
575
  }
576
  if (EXPECT(len > 0, TRUE))
42✔
577
    BCOPY(str, copy, len);
42✔
578
  copy[len] = '\0';
42✔
579
  return copy;
42✔
580
}
581

582
#ifdef GC_REQUIRE_WCSDUP
583
# include <wchar.h> /* for wcslen() */
584

585
  GC_API GC_ATTR_MALLOC wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
42✔
586
  {
587
    size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
42✔
588
    wchar_t *copy = (wchar_t *)GC_malloc_atomic(lb);
42✔
589

590
    if (EXPECT(NULL == copy, FALSE)) {
42✔
591
#     ifndef MSWINCE
592
        errno = ENOMEM;
×
593
#     endif
594
      return NULL;
×
595
    }
596
    BCOPY(str, copy, lb);
42✔
597
    return copy;
42✔
598
  }
599
#endif /* GC_REQUIRE_WCSDUP */
600

601
#ifndef CPPCHECK
602
  GC_API void * GC_CALL GC_malloc_stubborn(size_t lb)
×
603
  {
604
    return GC_malloc(lb);
×
605
  }
606

607
  GC_API void GC_CALL GC_change_stubborn(const void *p)
×
608
  {
609
    UNUSED_ARG(p);
610
  }
×
611
#endif /* !CPPCHECK */
612

613
GC_API void GC_CALL GC_end_stubborn_change(const void *p)
144,546,310✔
614
{
615
  GC_dirty(p); /* entire object */
144,546,310✔
616
}
144,546,310✔
617

618
GC_API void GC_CALL GC_ptr_store_and_dirty(void *p, const void *q)
117,494,621✔
619
{
620
  *(const void **)p = q;
117,494,621✔
621
  GC_dirty(p);
117,494,621✔
622
  REACHABLE_AFTER_DIRTY(q);
117,494,621✔
623
}
117,745,096✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc