• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1483

10 Apr 2023 09:28PM UTC coverage: 76.429% (-0.02%) from 76.447%
1483

push

travis-ci-com

ivmai
Fix double lock in GC_init_finalized_malloc
(fix of commit 05ee810c3)

* fnlz_mlc.c [ENABLE_DISCLAIM] (GC_register_disclaim_proc_inner): New
STATIC function (move most code from GC_register_disclaim_proc except
for LOCK/UNLOCK).
* fnlz_mlc.c [ENABLE_DISCLAIM] (GC_init_finalized_malloc): Call
GC_register_disclaim_proc_inner instead of GC_register_disclaim_proc.
* fnlz_mlc.c [ENABLE_DISCLAIM] (GC_register_disclaim_proc): Call
GC_register_disclaim_proc_inner().

7 of 7 new or added lines in 1 file covered. (100.0%)

7740 of 10127 relevant lines covered (76.43%)

8999452.48 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.81
/mallocx.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
6
 * Copyright (c) 2009-2022 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_priv.h"
19

20
/*
21
 * These are extra allocation routines which are likely to be less
22
 * frequently used than those in malloc.c.  They are separate in the
23
 * hope that the .o file will be excluded from statically linked
24
 * executables.  We should probably break this up further.
25
 */
26

27
#include <string.h>
28

29
#ifndef MSWINCE
30
# include <errno.h>
31
#endif
32

33
/* Some externally visible but unadvertised variables to allow access to */
34
/* free lists from inlined allocators without including gc_priv.h        */
35
/* or introducing dependencies on internal data structure layouts.       */
36
#include "private/gc_alloc_ptrs.h"
37
void ** const GC_objfreelist_ptr = GC_objfreelist;
38
void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
39
void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
40
# ifdef GC_ATOMIC_UNCOLLECTABLE
41
    void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
42
# endif
43

44
GC_API int GC_CALL GC_get_kind_and_size(const void * p, size_t * psize)
847,716✔
45
{
46
    hdr * hhdr = HDR(p);
847,716✔
47

48
    if (psize != NULL) {
847,939✔
49
        *psize = (size_t)hhdr->hb_sz;
423,965✔
50
    }
51
    return hhdr -> hb_obj_kind;
847,939✔
52
}
53

54
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_or_special_malloc(size_t lb,
904,119✔
55
                                                                  int k)
56
{
57
    switch (k) {
904,119✔
58
        case PTRFREE:
59
        case NORMAL:
60
            return GC_malloc_kind(lb, k);
904,035✔
61
        case UNCOLLECTABLE:
62
#       ifdef GC_ATOMIC_UNCOLLECTABLE
63
          case AUNCOLLECTABLE:
64
#       endif
65
            return GC_generic_malloc_uncollectable(lb, k);
84✔
66
        default:
67
            return GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0);
×
68
    }
69
}
70

71
/* Change the size of the block pointed to by p to contain at least   */
72
/* lb bytes.  The object may be (and quite likely will be) moved.     */
73
/* The kind (e.g. atomic) is the same as that of the old.             */
74
/* Shrinking of large blocks is not implemented well.                 */
75
GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
20,000,252✔
76
{
77
    struct hblk * h;
78
    hdr * hhdr;
79
    void * result;
80
#   if defined(_FORTIFY_SOURCE) && defined(__GNUC__) && !defined(__clang__)
81
      volatile  /* Use cleared_p instead of p as a workaround to avoid  */
82
                /* passing alloc_size(lb) attribute associated with p   */
83
                /* to memset (including memset call inside GC_free).    */
84
#   endif
85
      word cleared_p = (word)p;
20,000,252✔
86
    size_t sz;      /* Current size in bytes    */
87
    size_t orig_sz; /* Original sz in bytes     */
88
    int obj_kind;
89

90
    if (NULL == p) return GC_malloc(lb);  /* Required by ANSI */
20,000,252✔
91
    if (0 == lb) /* and p != NULL */ {
20,000,252✔
92
#     ifndef IGNORE_FREE
93
        GC_free(p);
×
94
#     endif
95
      return NULL;
×
96
    }
97
    h = HBLKPTR(p);
20,000,252✔
98
    hhdr = HDR(h);
20,000,252✔
99
    sz = (size_t)hhdr->hb_sz;
20,000,252✔
100
    obj_kind = hhdr -> hb_obj_kind;
20,000,252✔
101
    orig_sz = sz;
20,000,252✔
102

103
    if (sz > MAXOBJBYTES) {
20,000,252✔
104
        /* Round it up to the next whole heap block */
105
        word descr = GC_obj_kinds[obj_kind].ok_descriptor;
168✔
106

107
        sz = (sz + HBLKSIZE-1) & ~(HBLKSIZE-1);
168✔
108
        if (GC_obj_kinds[obj_kind].ok_relocate_descr)
168✔
109
          descr += sz;
168✔
110
        /* GC_realloc might be changing the block size while            */
111
        /* GC_reclaim_block or GC_clear_hdr_marks is examining it.      */
112
        /* The change to the size field is benign, in that GC_reclaim   */
113
        /* (and GC_clear_hdr_marks) would work correctly with either    */
114
        /* value, since we are not changing the number of objects in    */
115
        /* the block.  But seeing a half-updated value (though unlikely */
116
        /* to occur in practice) could be probably bad.                 */
117
        /* Using unordered atomic accesses on the size and hb_descr     */
118
        /* fields would solve the issue.  (The alternate solution might */
119
        /* be to initially overallocate large objects, so we do not     */
120
        /* have to adjust the size in GC_realloc, if they still fit.    */
121
        /* But that is probably more expensive, since we may end up     */
122
        /* scanning a bunch of zeros during GC.)                        */
123
#       ifdef AO_HAVE_store
124
          GC_STATIC_ASSERT(sizeof(hhdr->hb_sz) == sizeof(AO_t));
125
          AO_store((volatile AO_t *)&hhdr->hb_sz, (AO_t)sz);
168✔
126
          AO_store((volatile AO_t *)&hhdr->hb_descr, (AO_t)descr);
168✔
127
#       else
128
          {
129
            LOCK();
130
            hhdr -> hb_sz = sz;
131
            hhdr -> hb_descr = descr;
132
            UNLOCK();
133
          }
134
#       endif
135

136
#         ifdef MARK_BIT_PER_OBJ
137
            GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
138
#         endif
139
#         ifdef MARK_BIT_PER_GRANULE
140
            GC_ASSERT((hhdr -> hb_flags & LARGE_BLOCK) != 0
168✔
141
                        && hhdr -> hb_map[ANY_INDEX] == 1);
142
#         endif
143
          if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
168✔
144
          /* Extra area is already cleared by GC_alloc_large_and_clear. */
145
    }
146
    if (ADD_EXTRA_BYTES(lb) <= sz) {
20,000,252✔
147
        if (lb >= (sz >> 1)) {
20,000,084✔
148
            if (orig_sz > lb) {
19,520,108✔
149
              /* Clear unneeded part of object to avoid bogus pointer */
150
              /* tracing.                                             */
151
                BZERO((ptr_t)cleared_p + lb, orig_sz - lb);
19,520,024✔
152
            }
153
            return p;
19,520,108✔
154
        }
155
        /* shrink */
156
        sz = lb;
479,976✔
157
    }
158
    result = GC_generic_or_special_malloc((word)lb, obj_kind);
480,144✔
159
    if (EXPECT(result != NULL, TRUE)) {
480,144✔
160
      /* In case of shrink, it could also return original object.       */
161
      /* But this gives the client warning of imminent disaster.        */
162
      BCOPY(p, result, sz);
480,144✔
163
#     ifndef IGNORE_FREE
164
        GC_free((ptr_t)cleared_p);
480,144✔
165
#     endif
166
    }
167
    return result;
480,144✔
168
}
169

170
# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
171
#   define REDIRECT_REALLOC GC_realloc
172
# endif
173

174
# ifdef REDIRECT_REALLOC
175

176
/* As with malloc, avoid two levels of extra calls here.        */
177
# define GC_debug_realloc_replacement(p, lb) \
178
        GC_debug_realloc(p, lb, GC_DBG_EXTRAS)
179

180
# if !defined(REDIRECT_MALLOC_IN_HEADER)
181
    void * realloc(void * p, size_t lb)
182
    {
183
      return REDIRECT_REALLOC(p, lb);
184
    }
185
# endif
186

187
# undef GC_debug_realloc_replacement
188
# endif /* REDIRECT_REALLOC */
189

190
/* Allocate memory such that only pointers to near the          */
191
/* beginning of the object are considered.                      */
192
/* We avoid holding allocation lock while we clear the memory.  */
193
GC_API GC_ATTR_MALLOC void * GC_CALL
194
    GC_generic_malloc_ignore_off_page(size_t lb, int k)
423,817✔
195
{
196
  return GC_generic_malloc_aligned(lb, k, IGNORE_OFF_PAGE, 0 /* align_m1 */);
423,817✔
197
}
198

199
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
423,819✔
200
{
201
    return GC_generic_malloc_aligned(lb, NORMAL, IGNORE_OFF_PAGE, 0);
423,819✔
202
}
203

204
GC_API GC_ATTR_MALLOC void * GC_CALL
205
    GC_malloc_atomic_ignore_off_page(size_t lb)
423,827✔
206
{
207
    return GC_generic_malloc_aligned(lb, PTRFREE, IGNORE_OFF_PAGE, 0);
423,827✔
208
}
209

210
/* Increment GC_bytes_allocd from code that doesn't have direct access  */
211
/* to GC_arrays.                                                        */
212
void GC_CALL GC_incr_bytes_allocd(size_t n)
42✔
213
{
214
    GC_bytes_allocd += n;
42✔
215
}
42✔
216

217
/* The same for GC_bytes_freed.                         */
218
void GC_CALL GC_incr_bytes_freed(size_t n)
42✔
219
{
220
    GC_bytes_freed += n;
42✔
221
}
42✔
222

223
GC_API size_t GC_CALL GC_get_expl_freed_bytes_since_gc(void)
2,858,800✔
224
{
225
    return (size_t)GC_bytes_freed;
2,858,800✔
226
}
227

228
# ifdef PARALLEL_MARK
229
    STATIC volatile AO_t GC_bytes_allocd_tmp = 0;
230
                        /* Number of bytes of memory allocated since    */
231
                        /* we released the GC lock.  Instead of         */
232
                        /* reacquiring the GC lock just to add this in, */
233
                        /* we add it in the next time we reacquire      */
234
                        /* the lock.  (Atomically adding it doesn't     */
235
                        /* work, since we would have to atomically      */
236
                        /* update it in GC_malloc, which is too         */
237
                        /* expensive.)                                  */
238
# endif /* PARALLEL_MARK */
239

240
/* Return a list of 1 or more objects of the indicated size, linked     */
241
/* through the first word in the object.  This has the advantage that   */
242
/* it acquires the allocation lock only once, and may greatly reduce    */
243
/* time wasted contending for the allocation lock.  Typical usage would */
244
/* be in a thread that requires many items of the same size.  It would  */
245
/* keep its own free list in thread-local storage, and call             */
246
/* GC_malloc_many or friends to replenish it.  (We do not round up      */
247
/* object sizes, since a call indicates the intention to consume many   */
248
/* objects of exactly this size.)                                       */
249
/* We assume that the size is a multiple of GRANULE_BYTES.              */
250
/* We return the free-list by assigning it to *result, since it is      */
251
/* not safe to return, e.g. a linked list of pointer-free objects,      */
252
/* since the collector would not retain the entire list if it were      */
253
/* invoked just as we were returning.                                   */
254
/* Note that the client should usually clear the link field.            */
255
GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
3,856,752✔
256
{
257
    void *op;
258
    void *p;
259
    void **opp;
260
    size_t lw;      /* Length in words.     */
261
    size_t lg;      /* Length in granules.  */
262
    signed_word my_bytes_allocd = 0;
3,856,752✔
263
    struct obj_kind * ok;
264
    struct hblk ** rlh;
265

266
    GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
3,856,752✔
267
    /* Currently a single object is always allocated if manual VDB. */
268
    /* TODO: GC_dirty should be called for each linked object (but  */
269
    /* the last one) to support multiple objects allocation.        */
270
    if (!SMALL_OBJ(lb) || GC_manual_vdb) {
3,856,779✔
271
        op = GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0 /* align_m1 */);
37✔
272
        if (EXPECT(0 != op, TRUE))
×
273
            obj_link(op) = 0;
×
274
        *result = op;
×
275
#       ifndef GC_DISABLE_INCREMENTAL
276
          if (GC_manual_vdb && GC_is_heap_ptr(result)) {
×
277
            GC_dirty_inner(result);
×
278
            REACHABLE_AFTER_DIRTY(op);
×
279
          }
280
#       endif
281
        return;
1,260,284✔
282
    }
283
    GC_ASSERT(k < MAXOBJKINDS);
3,856,742✔
284
    lw = BYTES_TO_WORDS(lb);
3,856,742✔
285
    lg = BYTES_TO_GRANULES(lb);
3,856,742✔
286
    if (EXPECT(get_have_errors(), FALSE))
3,856,742✔
287
      GC_print_all_errors();
40✔
288
    GC_INVOKE_FINALIZERS();
3,856,742✔
289
    GC_DBG_COLLECT_AT_MALLOC(lb);
290
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
3,856,753✔
291
    LOCK();
3,856,770✔
292
    /* Do our share of marking work */
293
      if (GC_incremental && !GC_dont_gc) {
3,856,803✔
294
        ENTER_GC();
2,043,628✔
295
        GC_collect_a_little_inner(1);
2,043,628✔
296
        EXIT_GC();
2,043,629✔
297
      }
298
    /* First see if we can reclaim a page of objects waiting to be */
299
    /* reclaimed.                                                  */
300
    ok = &GC_obj_kinds[k];
3,856,804✔
301
    rlh = ok -> ok_reclaim_list;
3,856,804✔
302
    if (rlh != NULL) {
3,856,804✔
303
        struct hblk * hbp;
304
        hdr * hhdr;
305

306
        while ((hbp = rlh[lg]) != NULL) {
7,713,608✔
307
            hhdr = HDR(hbp);
1,039,022✔
308
            rlh[lg] = hhdr -> hb_next;
1,039,022✔
309
            GC_ASSERT(hhdr -> hb_sz == lb);
1,039,022✔
310
            hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
1,039,022✔
311
#           ifdef PARALLEL_MARK
312
              if (GC_parallel) {
1,039,022✔
313
                  signed_word my_bytes_allocd_tmp =
742,583✔
314
                                (signed_word)AO_load(&GC_bytes_allocd_tmp);
742,583✔
315
                  GC_ASSERT(my_bytes_allocd_tmp >= 0);
742,583✔
316
                  /* We only decrement it while holding the GC lock.    */
317
                  /* Thus we can't accidentally adjust it down in more  */
318
                  /* than one thread simultaneously.                    */
319

320
                  if (my_bytes_allocd_tmp != 0) {
742,583✔
321
                    (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
664,889✔
322
                                           (AO_t)(-my_bytes_allocd_tmp));
323
                    GC_bytes_allocd += my_bytes_allocd_tmp;
664,889✔
324
                  }
325
                  GC_acquire_mark_lock();
742,583✔
326
                  ++ GC_fl_builder_count;
742,583✔
327
                  UNLOCK();
742,583✔
328
                  GC_release_mark_lock();
742,583✔
329
              }
330
#           endif
331
            op = GC_reclaim_generic(hbp, hhdr, lb,
1,039,022✔
332
                                    ok -> ok_init, 0, &my_bytes_allocd);
333
            if (op != 0) {
1,038,884✔
334
#             ifdef PARALLEL_MARK
335
                if (GC_parallel) {
1,038,884✔
336
                  *result = op;
742,445✔
337
                  (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
742,445✔
338
                                         (AO_t)my_bytes_allocd);
339
                  GC_acquire_mark_lock();
742,445✔
340
                  -- GC_fl_builder_count;
742,583✔
341
                  if (GC_fl_builder_count == 0) GC_notify_all_builder();
742,583✔
342
#                 ifdef THREAD_SANITIZER
343
                    GC_release_mark_lock();
344
                    LOCK();
345
                    GC_bytes_found += my_bytes_allocd;
346
                    UNLOCK();
347
#                 else
348
                    GC_bytes_found += my_bytes_allocd;
742,583✔
349
                                        /* The result may be inaccurate. */
350
                    GC_release_mark_lock();
742,583✔
351
#                 endif
352
                  (void) GC_clear_stack(0);
742,583✔
353
                  return;
741,692✔
354
                }
355
#             endif
356
              /* We also reclaimed memory, so we need to adjust       */
357
              /* that count.                                          */
358
              GC_bytes_found += my_bytes_allocd;
296,439✔
359
              GC_bytes_allocd += my_bytes_allocd;
296,439✔
360
              goto out;
296,439✔
361
            }
362
#           ifdef PARALLEL_MARK
363
              if (GC_parallel) {
×
364
                GC_acquire_mark_lock();
×
365
                -- GC_fl_builder_count;
×
366
                if (GC_fl_builder_count == 0) GC_notify_all_builder();
×
367
                GC_release_mark_lock();
×
368
                LOCK();
×
369
                /* The GC lock is needed for reclaim list access.  We   */
370
                /* must decrement fl_builder_count before reacquiring   */
371
                /* the lock.  Hopefully this path is rare.              */
372

373
                rlh = ok -> ok_reclaim_list; /* reload rlh after locking */
×
374
                if (NULL == rlh) break;
×
375
              }
376
#           endif
377
        }
378
    }
379
    /* Next try to use prefix of global free list if there is one.      */
380
    /* We don't refill it, but we need to use it up before allocating   */
381
    /* a new block ourselves.                                           */
382
      opp = &(ok -> ok_freelist[lg]);
2,817,782✔
383
      if ((op = *opp) != NULL) {
2,817,782✔
384
        *opp = 0;
334,105✔
385
        my_bytes_allocd = 0;
334,105✔
386
        for (p = op; p != 0; p = obj_link(p)) {
10,559,498✔
387
          my_bytes_allocd += lb;
10,225,488✔
388
          if ((word)my_bytes_allocd >= HBLKSIZE) {
10,225,488✔
389
            *opp = obj_link(p);
95✔
390
            obj_link(p) = 0;
95✔
391
            break;
95✔
392
          }
393
        }
394
        GC_bytes_allocd += my_bytes_allocd;
334,105✔
395
        goto out;
334,105✔
396
      }
397
    /* Next try to allocate a new block worth of objects of this size.  */
398
    {
399
        struct hblk *h = GC_allochblk(lb, k, 0 /* flags */, 0 /* align_m1 */);
2,483,677✔
400

401
        if (h /* != NULL */) { /* CPPCHECK */
2,483,677✔
402
          if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
922,500✔
403
          GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
922,500✔
404
#         ifdef PARALLEL_MARK
405
            if (GC_parallel) {
922,500✔
406
              GC_acquire_mark_lock();
517,679✔
407
              ++ GC_fl_builder_count;
517,679✔
408
              UNLOCK();
517,679✔
409
              GC_release_mark_lock();
517,679✔
410

411
              op = GC_build_fl(h, lw,
552,129✔
412
                        (ok -> ok_init || GC_debugging_started), 0);
552,129✔
413

414
              *result = op;
517,678✔
415
              GC_acquire_mark_lock();
517,678✔
416
              -- GC_fl_builder_count;
517,679✔
417
              if (GC_fl_builder_count == 0) GC_notify_all_builder();
517,679✔
418
              GC_release_mark_lock();
517,679✔
419
              (void) GC_clear_stack(0);
517,679✔
420
              return;
517,664✔
421
            }
422
#         endif
423
          op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
404,821✔
424
          goto out;
404,821✔
425
        }
426
    }
427

428
    /* As a last attempt, try allocating a single object.  Note that    */
429
    /* this may trigger a collection or expand the heap.                */
430
      op = GC_generic_malloc_inner(lb, k, 0 /* flags */);
1,561,177✔
431
      if (op != NULL) obj_link(op) = NULL;
1,561,177✔
432

433
  out:
434
    *result = op;
2,596,542✔
435
    UNLOCK();
2,596,542✔
436
    (void) GC_clear_stack(0);
2,596,542✔
437
}
438

439
/* Note that the "atomic" version of this would be unsafe, since the    */
440
/* links would not be seen by the collector.                            */
441
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t lb)
416,649✔
442
{
443
    void *result;
444
    size_t lg = ALLOC_REQUEST_GRANS(lb);
416,649✔
445

446
    GC_generic_malloc_many(GRANULES_TO_BYTES(lg), NORMAL, &result);
416,649✔
447
    return result;
416,640✔
448
}
449

450
/* TODO: The debugging version of GC_memalign and friends is tricky     */
451
/* and currently missing.  There are 2 major difficulties:              */
452
/* - GC_base() should always point to the beginning of the allocated    */
453
/* block (thus, for small objects allocation we should probably         */
454
/* iterate over the list of free objects to find the one with the       */
455
/* suitable alignment);                                                 */
456
/* - store_debug_info() should return the pointer of the object with    */
457
/* the requested alignment (unlike the object header).                  */
458

459
GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb)
630✔
460
{
461
    size_t offset;
462
    ptr_t result;
463
    size_t align_m1 = align - 1;
630✔
464

465
    /* Check the alignment argument.    */
466
    if (align < sizeof(void *) || (align & align_m1) != 0) return NULL;
630✔
467

468
    if (align <= GRANULE_BYTES) return GC_malloc(lb);
630✔
469

470
    if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
546✔
471
      return GC_clear_stack(GC_generic_malloc_aligned(lb, NORMAL,
252✔
472
                                        0 /* flags */, align_m1));
473
    }
474

475
    /* We could also try to make sure that the real rounded-up object size */
476
    /* is a multiple of align.  That would be correct up to HBLKSIZE.      */
477
    /* TODO: Not space efficient for big align values. */
478
    result = (ptr_t)GC_malloc(SIZET_SAT_ADD(lb, align_m1));
294✔
479
            /* It is OK not to check result for NULL as in that case    */
480
            /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */
481
    offset = (size_t)(word)result & align_m1;
294✔
482
    if (offset != 0) {
294✔
483
        offset = align - offset;
198✔
484
        if (!GC_all_interior_pointers) {
198✔
485
            GC_STATIC_ASSERT(VALID_OFFSET_SZ <= HBLKSIZE);
486
            GC_ASSERT(offset < VALID_OFFSET_SZ);
×
487
            GC_register_displacement(offset);
×
488
        }
489
        result += offset;
198✔
490
    }
491
    GC_ASSERT(((word)result & align_m1) == 0);
294✔
492
    return result;
294✔
493
}
494

495
/* This one exists largely to redirect posix_memalign for leaks finding. */
496
GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
42✔
497
{
498
  /* Check alignment properly.  */
499
  size_t align_minus_one = align - 1; /* to workaround a cppcheck warning */
42✔
500
  if (align < sizeof(void *) || (align_minus_one & align) != 0) {
42✔
501
#   ifdef MSWINCE
502
      return ERROR_INVALID_PARAMETER;
503
#   else
504
      return EINVAL;
×
505
#   endif
506
  }
507

508
  if ((*memptr = GC_memalign(align, lb)) == NULL) {
42✔
509
#   ifdef MSWINCE
510
      return ERROR_NOT_ENOUGH_MEMORY;
511
#   else
512
      return ENOMEM;
×
513
#   endif
514
  }
515
  return 0;
42✔
516
}
517

518
#ifndef GC_NO_VALLOC
519
  GC_API GC_ATTR_MALLOC void * GC_CALL GC_valloc(size_t lb)
42✔
520
  {
521
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
42✔
522
    GC_ASSERT(GC_real_page_size != 0);
42✔
523
    return GC_memalign(GC_real_page_size, lb);
42✔
524
  }
525

526
  GC_API GC_ATTR_MALLOC void * GC_CALL GC_pvalloc(size_t lb)
42✔
527
  {
528
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
42✔
529
    GC_ASSERT(GC_real_page_size != 0);
42✔
530
    lb = SIZET_SAT_ADD(lb, GC_real_page_size - 1) & ~(GC_real_page_size - 1);
42✔
531
    return GC_memalign(GC_real_page_size, lb);
42✔
532
  }
533
#endif /* !GC_NO_VALLOC */
534

535
/* Provide a version of strdup() that uses the collector to allocate    */
536
/* the copy of the string.                                              */
537
GC_API GC_ATTR_MALLOC char * GC_CALL GC_strdup(const char *s)
21,121✔
538
{
539
  char *copy;
540
  size_t lb;
541
  if (s == NULL) return NULL;
21,121✔
542
  lb = strlen(s) + 1;
21,121✔
543
  copy = (char *)GC_malloc_atomic(lb);
21,121✔
544
  if (EXPECT(NULL == copy, FALSE)) {
21,122✔
545
#   ifndef MSWINCE
546
      errno = ENOMEM;
×
547
#   endif
548
    return NULL;
×
549
  }
550
  BCOPY(s, copy, lb);
21,122✔
551
  return copy;
21,122✔
552
}
553

554
GC_API GC_ATTR_MALLOC char * GC_CALL GC_strndup(const char *str, size_t size)
42✔
555
{
556
  char *copy;
557
  size_t len = strlen(str); /* str is expected to be non-NULL  */
42✔
558
  if (EXPECT(len > size, FALSE))
42✔
559
    len = size;
42✔
560
  copy = (char *)GC_malloc_atomic(len + 1);
42✔
561
  if (EXPECT(NULL == copy, FALSE)) {
42✔
562
#   ifndef MSWINCE
563
      errno = ENOMEM;
×
564
#   endif
565
    return NULL;
×
566
  }
567
  if (EXPECT(len > 0, TRUE))
42✔
568
    BCOPY(str, copy, len);
42✔
569
  copy[len] = '\0';
42✔
570
  return copy;
42✔
571
}
572

573
#ifdef GC_REQUIRE_WCSDUP
574
# include <wchar.h> /* for wcslen() */
575

576
  GC_API GC_ATTR_MALLOC wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
42✔
577
  {
578
    size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
42✔
579
    wchar_t *copy = (wchar_t *)GC_malloc_atomic(lb);
42✔
580

581
    if (EXPECT(NULL == copy, FALSE)) {
42✔
582
#     ifndef MSWINCE
583
        errno = ENOMEM;
×
584
#     endif
585
      return NULL;
×
586
    }
587
    BCOPY(str, copy, lb);
42✔
588
    return copy;
42✔
589
  }
590
#endif /* GC_REQUIRE_WCSDUP */
591

592
#ifndef CPPCHECK
593
  GC_API void * GC_CALL GC_malloc_stubborn(size_t lb)
×
594
  {
595
    return GC_malloc(lb);
×
596
  }
597

598
  GC_API void GC_CALL GC_change_stubborn(const void *p)
×
599
  {
600
    UNUSED_ARG(p);
601
  }
×
602
#endif /* !CPPCHECK */
603

604
GC_API void GC_CALL GC_end_stubborn_change(const void *p)
144,518,563✔
605
{
606
  GC_dirty(p); /* entire object */
144,518,563✔
607
}
144,518,563✔
608

609
GC_API void GC_CALL GC_ptr_store_and_dirty(void *p, const void *q)
117,438,929✔
610
{
611
  *(const void **)p = q;
117,438,929✔
612
  GC_dirty(p);
117,438,929✔
613
  REACHABLE_AFTER_DIRTY(q);
117,438,929✔
614
}
117,455,736✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc