• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1484

12 Apr 2023 08:54AM UTC coverage: 76.502% (+0.07%) from 76.429%
1484

push

travis-ci-com

ivmai
Do not add extra byte to large ignore-off-page objects

For ignore-off-page objects the client should guarantee the pointer
within the first heap block of the object, thus no need to add an extra
byte for such objects if the object size of at least one heap block.

* allchblk.c (setup_header): Add assertion that byte_sz is not less
than ALIGNMENT.
* allchblk.c [ALIGNMENT>GC_DS_TAGS] (setup_header): Modify descr local
variable to make it zero if IGNORE_OFF_PAGE flag is set and kind is
NORMAL (and object size is not less than HBLKSIZE); add comment.
* mallocx.c [ALIGNMENT>GC_DS_TAGS] (GC_realloc): Likewise.
* include/gc/gc.h (GC_all_interior_pointers): Update comment.
* include/private/gc_priv.h [MAX_EXTRA_BYTES==0] (ADD_EXTRA_BYTES):
Define as no-op.
* malloc.c (GC_generic_malloc_inner): Define lb_adjusted local
variable; pass lb_adjusted to GC_alloc_large_and_clear().
* malloc.c [MAX_EXTRA_BYTES>0] (GC_generic_malloc_inner): Set
lb_adjusted to lb if IGNORE_OFF_PAGE flag is set and lb is not less
than HBLKSIZE.
* malloc.c [MAX_EXTRA_BYTES>0] (GC_generic_malloc_aligned): Set
lb_rounded without EXTRA_BYTES added (and compute lg based on
lb_rounded) if IGNORE_OFF_PAGE is set and lb is not less than HBLKSIZE.
* mallocx.c (GC_realloc): Define ok local variable.
* typd_mlc.c (GC_malloc_explicitly_typed_ignore_off_page): Remove
lb_adjusted local variable; call GC_malloc_explicitly_typed() if
lb is smaller than HBLKSIZE-sizeof(word), otherwise pass lb plus
sizeof(word) (instead of lb plus TYPD_EXTRA_BYTES) to
GC_generic_malloc_aligned; add comment.

24 of 24 new or added lines in 4 files covered. (100.0%)

7765 of 10150 relevant lines covered (76.5%)

8458785.63 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.51
/alloc.c
1
/*
2
 * Copyright (c) 1988-1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 1999-2011 Hewlett-Packard Development Company, L.P.
6
 * Copyright (c) 2008-2022 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 *
17
 */
18

19
#include "private/gc_priv.h"
20

21
#if !defined(MACOS) && !defined(MSWINCE)
22
# include <signal.h>
23
# if !defined(GC_NO_TYPES) && !defined(SN_TARGET_PSP2) \
24
     && !defined(__CC_ARM)
25
#   include <sys/types.h>
26
# endif
27
#endif
28

29
/*
30
 * Separate free lists are maintained for different sized objects
31
 * up to MAXOBJBYTES.
32
 * The call GC_allocobj(i,k) ensures that the freelist for
33
 * kind k objects of size i points to a non-empty
34
 * free list. It returns a pointer to the first entry on the free list.
35
 * In a single-threaded world, GC_allocobj may be called to allocate
36
 * an object of small size lb (and NORMAL kind) as follows
37
 * (GC_generic_malloc_inner is a wrapper over GC_allocobj which also
38
 * fills in GC_size_map if needed):
39
 *
40
 *   lg = GC_size_map[lb];
41
 *   op = GC_objfreelist[lg];
42
 *   if (NULL == op) {
43
 *     op = GC_generic_malloc_inner(lb, NORMAL, 0);
44
 *   } else {
45
 *     GC_objfreelist[lg] = obj_link(op);
46
 *     GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
47
 *   }
48
 *
49
 * Note that this is very fast if the free list is non-empty; it should
50
 * only involve the execution of 4 or 5 simple instructions.
51
 * All composite objects on freelists are cleared, except for
52
 * their first word.
53
 */
54

55
/*
56
 * The allocator uses GC_allochblk to allocate large chunks of objects.
57
 * These chunks all start on addresses which are multiples of
58
 * HBLKSZ.   Each allocated chunk has an associated header,
59
 * which can be located quickly based on the address of the chunk.
60
 * (See headers.c for details.)
61
 * This makes it possible to check quickly whether an
62
 * arbitrary address corresponds to an object administered by the
63
 * allocator.
64
 */
65

66
word GC_non_gc_bytes = 0;  /* Number of bytes not intended to be collected */
67

68
word GC_gc_no = 0;
69

70
#ifndef NO_CLOCK
71
  static unsigned long full_gc_total_time = 0; /* in ms, may wrap */
72
  static unsigned full_gc_total_ns_frac = 0; /* fraction of 1 ms */
73
  static GC_bool measure_performance = FALSE;
74
                /* Do performance measurements if set to true (e.g.,    */
75
                /* accumulation of the total time of full collections). */
76

77
  GC_API void GC_CALL GC_start_performance_measurement(void)
2✔
78
  {
79
    measure_performance = TRUE;
2✔
80
  }
2✔
81

82
  GC_API unsigned long GC_CALL GC_get_full_gc_total_time(void)
2✔
83
  {
84
    return full_gc_total_time;
2✔
85
  }
86
#endif /* !NO_CLOCK */
87

88
#ifndef GC_DISABLE_INCREMENTAL
89
  GC_INNER GC_bool GC_incremental = FALSE; /* By default, stop the world. */
90
  STATIC GC_bool GC_should_start_incremental_collection = FALSE;
91
#endif
92

93
GC_API int GC_CALL GC_is_incremental_mode(void)
16✔
94
{
95
  return (int)GC_incremental;
16✔
96
}
97

98
#ifdef THREADS
99
  int GC_parallel = FALSE;      /* By default, parallel GC is off.      */
100
#endif
101

102
#if defined(GC_FULL_FREQ) && !defined(CPPCHECK)
103
  int GC_full_freq = GC_FULL_FREQ;
104
#else
105
  int GC_full_freq = 19;   /* Every 20th collection is a full   */
106
                           /* collection, whether we need it    */
107
                           /* or not.                           */
108
#endif
109

110
STATIC GC_bool GC_need_full_gc = FALSE;
111
                           /* Need full GC due to heap growth.  */
112

113
#ifdef THREAD_LOCAL_ALLOC
114
  GC_INNER GC_bool GC_world_stopped = FALSE;
115
#endif
116

117
STATIC GC_bool GC_disable_automatic_collection = FALSE;
118

119
GC_API void GC_CALL GC_set_disable_automatic_collection(int value)
2✔
120
{
121
  LOCK();
2✔
122
  GC_disable_automatic_collection = (GC_bool)value;
2✔
123
  UNLOCK();
2✔
124
}
2✔
125

126
GC_API int GC_CALL GC_get_disable_automatic_collection(void)
2✔
127
{
128
  int value;
129

130
  LOCK();
2✔
131
  value = (int)GC_disable_automatic_collection;
2✔
132
  UNLOCK();
2✔
133
  return value;
2✔
134
}
135

136
STATIC word GC_used_heap_size_after_full = 0;
137

138
/* Version macros are now defined in gc_version.h, which is included by */
139
/* gc.h, which is included by gc_priv.h.                                */
140
#ifndef GC_NO_VERSION_VAR
141
  EXTERN_C_BEGIN
142
  extern const unsigned GC_version;
143
  EXTERN_C_END
144
  const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
145
                        (GC_VERSION_MINOR << 8) | GC_VERSION_MICRO);
146
#endif
147

148
GC_API unsigned GC_CALL GC_get_version(void)
2✔
149
{
150
  return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
2✔
151
          GC_VERSION_MICRO;
152
}
153

154
/* some more variables */
155

156
#ifdef GC_DONT_EXPAND
157
  int GC_dont_expand = TRUE;
158
#else
159
  int GC_dont_expand = FALSE;
160
#endif
161

162
#if defined(GC_FREE_SPACE_DIVISOR) && !defined(CPPCHECK)
163
  word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR; /* must be > 0 */
164
#else
165
  word GC_free_space_divisor = 3;
166
#endif
167

168
GC_INNER int GC_CALLBACK GC_never_stop_func(void)
7,576,959✔
169
{
170
  return FALSE;
7,576,959✔
171
}
172

173
#if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
174
  unsigned long GC_time_limit = GC_TIME_LIMIT;
175
                           /* We try to keep pause times from exceeding  */
176
                           /* this by much. In milliseconds.             */
177
#elif defined(PARALLEL_MARK)
178
  unsigned long GC_time_limit = GC_TIME_UNLIMITED;
179
                        /* The parallel marker cannot be interrupted for */
180
                        /* now, so the time limit is absent by default.  */
181
#else
182
  unsigned long GC_time_limit = 15;
183
#endif
184

185
#ifndef NO_CLOCK
186
  STATIC unsigned long GC_time_lim_nsec = 0;
187
                        /* The nanoseconds add-on to GC_time_limit      */
188
                        /* value.  Not updated by GC_set_time_limit().  */
189
                        /* Ignored if the value of GC_time_limit is     */
190
                        /* GC_TIME_UNLIMITED.                           */
191

192
# define TV_NSEC_LIMIT (1000UL * 1000) /* amount of nanoseconds in 1 ms */
193

194
  GC_API void GC_CALL GC_set_time_limit_tv(struct GC_timeval_s tv)
2✔
195
  {
196
    GC_ASSERT(tv.tv_ms <= GC_TIME_UNLIMITED);
2✔
197
    GC_ASSERT(tv.tv_nsec < TV_NSEC_LIMIT);
2✔
198
    GC_time_limit = tv.tv_ms;
2✔
199
    GC_time_lim_nsec = tv.tv_nsec;
2✔
200
  }
2✔
201

202
  GC_API struct GC_timeval_s GC_CALL GC_get_time_limit_tv(void)
2✔
203
  {
204
    struct GC_timeval_s tv;
205

206
    tv.tv_ms = GC_time_limit;
2✔
207
    tv.tv_nsec = GC_time_lim_nsec;
2✔
208
    return tv;
2✔
209
  }
210

211
  STATIC CLOCK_TYPE GC_start_time = CLOCK_TYPE_INITIALIZER;
212
                                /* Time at which we stopped world.      */
213
                                /* used only in GC_timeout_stop_func.   */
214
#endif /* !NO_CLOCK */
215

216
STATIC int GC_n_attempts = 0;   /* Number of attempts at finishing      */
217
                                /* collection within GC_time_limit.     */
218

219
STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
220
                                /* Accessed holding the allocator lock. */
221

222
GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
2✔
223
{
224
  GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
2✔
225
  LOCK();
2✔
226
  GC_default_stop_func = stop_func;
2✔
227
  UNLOCK();
2✔
228
}
2✔
229

230
GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
2✔
231
{
232
  GC_stop_func stop_func;
233

234
  LOCK();
2✔
235
  stop_func = GC_default_stop_func;
2✔
236
  UNLOCK();
2✔
237
  return stop_func;
2✔
238
}
239

240
#if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
241
# define GC_timeout_stop_func GC_default_stop_func
242
#else
243
  STATIC int GC_CALLBACK GC_timeout_stop_func(void)
6,695,407✔
244
  {
245
    CLOCK_TYPE current_time;
246
    static unsigned count = 0;
247
    unsigned long time_diff, nsec_diff;
248

249
    if (GC_default_stop_func())
6,695,407✔
250
      return TRUE;
6,695,407✔
251

252
    if (GC_time_limit == GC_TIME_UNLIMITED || (count++ & 3) != 0)
6,695,407✔
253
      return FALSE;
6,695,407✔
254

255
    GET_TIME(current_time);
×
256
    time_diff = MS_TIME_DIFF(current_time,GC_start_time);
×
257
    nsec_diff = NS_FRAC_TIME_DIFF(current_time, GC_start_time);
×
258
#   if defined(CPPCHECK)
259
      GC_noop1((word)&nsec_diff);
260
#   endif
261
    if (time_diff >= GC_time_limit
×
262
        && (time_diff > GC_time_limit || nsec_diff >= GC_time_lim_nsec)) {
×
263
      GC_COND_LOG_PRINTF("Abandoning stopped marking after %lu ms %lu ns"
×
264
                         " (attempt %d)\n",
265
                         time_diff, nsec_diff, GC_n_attempts);
266
      return TRUE;
×
267
    }
268

269
    return FALSE;
×
270
  }
271
#endif /* !GC_DISABLE_INCREMENTAL */
272

273
#ifdef THREADS
274
  GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
275
#endif
276

277
static size_t min_bytes_allocd_minimum = 1;
278
                        /* The lowest value returned by min_bytes_allocd(). */
279

280
GC_API void GC_CALL GC_set_min_bytes_allocd(size_t value)
2✔
281
{
282
    GC_ASSERT(value > 0);
2✔
283
    min_bytes_allocd_minimum = value;
2✔
284
}
2✔
285

286
GC_API size_t GC_CALL GC_get_min_bytes_allocd(void)
2✔
287
{
288
    return min_bytes_allocd_minimum;
2✔
289
}
290

291
/* Return the minimum number of bytes that must be allocated between    */
292
/* collections to amortize the collection cost.  Should be non-zero.    */
293
static word min_bytes_allocd(void)
44,972✔
294
{
295
    word result;
296
    word stack_size;
297
    word total_root_size;       /* includes double stack size,  */
298
                                /* since the stack is expensive */
299
                                /* to scan.                     */
300
    word scan_size;             /* Estimate of memory to be scanned     */
301
                                /* during normal GC.                    */
302

303
#   ifdef THREADS
304
      if (GC_need_to_lock) {
44,972✔
305
        /* We are multi-threaded... */
306
        stack_size = GC_total_stacksize;
18,822✔
307
        /* For now, we just use the value computed during the latest GC. */
308
#       ifdef DEBUG_THREADS
309
          GC_log_printf("Total stacks size: %lu\n",
310
                        (unsigned long)stack_size);
311
#       endif
312
      } else
313
#   endif
314
    /* else*/ {
315
#     ifdef STACK_NOT_SCANNED
316
        stack_size = 0;
317
#     elif defined(STACK_GROWS_UP)
318
        stack_size = GC_approx_sp() - GC_stackbottom;
319
#     else
320
        stack_size = GC_stackbottom - GC_approx_sp();
26,150✔
321
#     endif
322
    }
323

324
    total_root_size = 2 * stack_size + GC_root_size;
44,972✔
325
    scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
44,972✔
326
                + total_root_size;
327
    result = scan_size / GC_free_space_divisor;
44,972✔
328
    if (GC_incremental) {
44,972✔
329
      result /= 2;
34,721✔
330
    }
331
    return result > min_bytes_allocd_minimum
44,972✔
332
            ? result : min_bytes_allocd_minimum;
44,972✔
333
}
334

335
STATIC word GC_non_gc_bytes_at_gc = 0;
336
                /* Number of explicitly managed bytes of storage        */
337
                /* at last collection.                                  */
338

339
/* Return the number of bytes allocated, adjusted for explicit storage  */
340
/* management, etc..  This number is used in deciding when to trigger   */
341
/* collections.                                                         */
342
STATIC word GC_adj_bytes_allocd(void)
9,805,401✔
343
{
344
    signed_word result;
345
    signed_word expl_managed = (signed_word)GC_non_gc_bytes
19,610,802✔
346
                                - (signed_word)GC_non_gc_bytes_at_gc;
9,805,401✔
347

348
    /* Don't count what was explicitly freed, or newly allocated for    */
349
    /* explicit management.  Note that deallocating an explicitly       */
350
    /* managed object should not alter result, assuming the client      */
351
    /* is playing by the rules.                                         */
352
    result = (signed_word)GC_bytes_allocd
19,610,802✔
353
             + (signed_word)GC_bytes_dropped
9,805,401✔
354
             - (signed_word)GC_bytes_freed
9,805,401✔
355
             + (signed_word)GC_finalizer_bytes_freed
9,805,401✔
356
             - expl_managed;
357
    if (result > (signed_word)GC_bytes_allocd) {
9,805,401✔
358
        result = GC_bytes_allocd;
1,604,261✔
359
        /* probably client bug or unfortunate scheduling */
360
    }
361
    result += GC_bytes_finalized;
9,805,401✔
362
        /* We count objects enqueued for finalization as though they    */
363
        /* had been reallocated this round. Finalization is user        */
364
        /* visible progress.  And if we don't count this, we have       */
365
        /* stability problems for programs that finalize all objects.   */
366
    if (result < (signed_word)(GC_bytes_allocd >> 3)) {
9,805,401✔
367
        /* Always count at least 1/8 of the allocations.  We don't want */
368
        /* to collect too infrequently, since that would inhibit        */
369
        /* coalescing of free storage blocks.                           */
370
        /* This also makes us partially robust against client bugs.     */
371
        result = (signed_word)(GC_bytes_allocd >> 3);
871✔
372
    }
373
    return (word)result;
9,805,401✔
374
}
375

376

377
/* Clear up a few frames worth of garbage left at the top of the stack. */
378
/* This is used to prevent us from accidentally treating garbage left   */
379
/* on the stack by other parts of the collector as roots.  This         */
380
/* differs from the code in misc.c, which actually tries to keep the    */
381
/* stack clear of long-lived, client-generated garbage.                 */
382
STATIC void GC_clear_a_few_frames(void)
29,450✔
383
{
384
#   ifndef CLEAR_NWORDS
385
#     define CLEAR_NWORDS 64
386
#   endif
387
    volatile word frames[CLEAR_NWORDS];
388
    BZERO((word *)frames, CLEAR_NWORDS * sizeof(word));
29,450✔
389
}
29,450✔
390

391
GC_API void GC_CALL GC_start_incremental_collection(void)
×
392
{
393
# ifndef GC_DISABLE_INCREMENTAL
394
    if (!GC_incremental) return;
×
395

396
    LOCK();
×
397
    GC_should_start_incremental_collection = TRUE;
×
398
    ENTER_GC();
×
399
    GC_collect_a_little_inner(1);
×
400
    EXIT_GC();
×
401
    UNLOCK();
×
402
# endif
403
}
404

405
/* Have we allocated enough to amortize a collection? */
406
GC_INNER GC_bool GC_should_collect(void)
9,811,187✔
407
{
408
    static word last_min_bytes_allocd;
409
    static word last_gc_no;
410

411
    GC_ASSERT(I_HOLD_LOCK());
9,811,187✔
412
    if (last_gc_no != GC_gc_no) {
9,811,187✔
413
      last_min_bytes_allocd = min_bytes_allocd();
28,962✔
414
      last_gc_no = GC_gc_no;
28,962✔
415
    }
416
# ifndef GC_DISABLE_INCREMENTAL
417
    if (GC_should_start_incremental_collection) {
9,811,187✔
418
      GC_should_start_incremental_collection = FALSE;
×
419
      return TRUE;
×
420
    }
421
# endif
422
    if (GC_disable_automatic_collection) return FALSE;
9,811,187✔
423

424
    if (GC_last_heap_growth_gc_no == GC_gc_no)
9,811,187✔
425
      return TRUE; /* avoid expanding past limits used by blacklisting  */
5,786✔
426

427
    return GC_adj_bytes_allocd() >= last_min_bytes_allocd;
9,805,401✔
428
}
429

430
/* STATIC */ GC_start_callback_proc GC_start_call_back = 0;
431
                        /* Called at start of full collections.         */
432
                        /* Not called if 0.  Called with the allocation */
433
                        /* lock held.  Not used by GC itself.           */
434

435
GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn)
2✔
436
{
437
    LOCK();
2✔
438
    GC_start_call_back = fn;
2✔
439
    UNLOCK();
2✔
440
}
2✔
441

442
GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void)
2✔
443
{
444
    GC_start_callback_proc fn;
445

446
    LOCK();
2✔
447
    fn = GC_start_call_back;
2✔
448
    UNLOCK();
2✔
449
    return fn;
2✔
450
}
451

452
GC_INLINE void GC_notify_full_gc(void)
13,865✔
453
{
454
    if (GC_start_call_back != 0) {
13,865✔
455
        (*GC_start_call_back)();
×
456
    }
457
}
13,865✔
458

459
STATIC GC_bool GC_is_full_gc = FALSE;
460

461
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
462
STATIC void GC_finish_collection(void);
463

464
/* Initiate a garbage collection if appropriate.  Choose judiciously    */
465
/* between partial, full, and stop-world collections.                   */
466
STATIC void GC_maybe_gc(void)
8,025,476✔
467
{
468
  static int n_partial_gcs = 0;
469

470
  GC_ASSERT(I_HOLD_LOCK());
8,025,476✔
471
  ASSERT_CANCEL_DISABLED();
8,025,476✔
472
  if (!GC_should_collect()) return;
8,025,476✔
473

474
  if (!GC_incremental) {
17,011✔
475
    GC_gcollect_inner();
×
476
    return;
×
477
  }
478

479
  GC_ASSERT(!GC_collection_in_progress());
17,011✔
480
# ifdef PARALLEL_MARK
481
    if (GC_parallel)
17,011✔
482
      GC_wait_for_reclaim();
8,903✔
483
# endif
484
  if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
17,011✔
485
    GC_COND_LOG_PRINTF(
1,426✔
486
                "***>Full mark for collection #%lu after %lu allocd bytes\n",
487
                (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
488
    GC_promote_black_lists();
1,426✔
489
    (void)GC_reclaim_all((GC_stop_func)0, TRUE);
1,426✔
490
    GC_notify_full_gc();
1,426✔
491
    GC_clear_marks();
1,426✔
492
    n_partial_gcs = 0;
1,426✔
493
    GC_is_full_gc = TRUE;
1,426✔
494
  } else {
495
    n_partial_gcs++;
15,585✔
496
  }
497

498
  /* Try to mark with the world stopped.  If we run out of      */
499
  /* time, this turns into an incremental marking.              */
500
# ifndef NO_CLOCK
501
    if (GC_time_limit != GC_TIME_UNLIMITED) GET_TIME(GC_start_time);
17,011✔
502
# endif
503
  if (GC_stopped_mark(GC_timeout_stop_func)) {
17,011✔
504
#   ifdef SAVE_CALL_CHAIN
505
      GC_save_callers(GC_last_stack);
506
#   endif
507
    GC_finish_collection();
17,020✔
508
  } else if (!GC_is_full_gc) {
×
509
    /* Count this as the first attempt. */
510
    GC_n_attempts++;
×
511
  }
512
}
513

514
STATIC GC_on_collection_event_proc GC_on_collection_event = 0;
515

516
GC_API void GC_CALL GC_set_on_collection_event(GC_on_collection_event_proc fn)
2✔
517
{
518
    /* fn may be 0 (means no event notifier). */
519
    LOCK();
2✔
520
    GC_on_collection_event = fn;
2✔
521
    UNLOCK();
2✔
522
}
2✔
523

524
GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void)
2✔
525
{
526
    GC_on_collection_event_proc fn;
527

528
    LOCK();
2✔
529
    fn = GC_on_collection_event;
2✔
530
    UNLOCK();
2✔
531
    return fn;
2✔
532
}
533

534
/* Stop the world garbage collection.  If stop_func is not      */
535
/* GC_never_stop_func then abort if stop_func returns TRUE.     */
536
/* Return TRUE if we successfully completed the collection.     */
537
GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
12,667✔
538
{
539
#   ifndef NO_CLOCK
540
      CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
12,667✔
541
      GC_bool start_time_valid;
542
#   endif
543

544
    ASSERT_CANCEL_DISABLED();
12,667✔
545
    GC_ASSERT(I_HOLD_LOCK());
12,667✔
546
    GC_ASSERT(GC_is_initialized);
12,667✔
547
    if (GC_dont_gc || (*stop_func)()) return FALSE;
25,334✔
548
    if (GC_on_collection_event)
12,439✔
549
      GC_on_collection_event(GC_EVENT_START);
×
550
    if (GC_incremental && GC_collection_in_progress()) {
12,439✔
551
      GC_COND_LOG_PRINTF(
×
552
            "GC_try_to_collect_inner: finishing collection in progress\n");
553
      /* Just finish collection already in progress.    */
554
        while(GC_collection_in_progress()) {
×
555
            if ((*stop_func)()) {
×
556
              /* TODO: Notify GC_EVENT_ABANDON */
557
              return FALSE;
×
558
            }
559
            ENTER_GC();
×
560
            GC_collect_a_little_inner(1);
×
561
            EXIT_GC();
×
562
        }
563
    }
564
    GC_notify_full_gc();
12,439✔
565
#   ifndef NO_CLOCK
566
      start_time_valid = FALSE;
12,439✔
567
      if ((GC_print_stats | (int)measure_performance) != 0) {
12,439✔
568
        if (GC_print_stats)
2,088✔
569
          GC_log_printf("Initiating full world-stop collection!\n");
×
570
        start_time_valid = TRUE;
2,088✔
571
        GET_TIME(start_time);
2,088✔
572
      }
573
#   endif
574
    GC_promote_black_lists();
12,439✔
575
    /* Make sure all blocks have been reclaimed, so sweep routines      */
576
    /* don't see cleared mark bits.                                     */
577
    /* If we're guaranteed to finish, then this is unnecessary.         */
578
    /* In the find_leak case, we have to finish to guarantee that       */
579
    /* previously unmarked objects are not reported as leaks.           */
580
#       ifdef PARALLEL_MARK
581
          if (GC_parallel)
12,439✔
582
            GC_wait_for_reclaim();
4,095✔
583
#       endif
584
        if ((GC_find_leak || stop_func != GC_never_stop_func)
12,439✔
585
            && !GC_reclaim_all(stop_func, FALSE)) {
66✔
586
            /* Aborted.  So far everything is still consistent. */
587
            /* TODO: Notify GC_EVENT_ABANDON */
588
            return FALSE;
×
589
        }
590
    GC_invalidate_mark_state();  /* Flush mark stack.   */
12,439✔
591
    GC_clear_marks();
12,439✔
592
#   ifdef SAVE_CALL_CHAIN
593
        GC_save_callers(GC_last_stack);
594
#   endif
595
    GC_is_full_gc = TRUE;
12,439✔
596
    if (!GC_stopped_mark(stop_func)) {
12,439✔
597
      if (!GC_incremental) {
×
598
        /* We're partially done and have no way to complete or use      */
599
        /* current work.  Reestablish invariants as cheaply as          */
600
        /* possible.                                                    */
601
        GC_invalidate_mark_state();
×
602
        GC_unpromote_black_lists();
×
603
      } /* else we claim the world is already still consistent.  We'll  */
604
        /* finish incrementally.                                        */
605
      /* TODO: Notify GC_EVENT_ABANDON */
606
      return FALSE;
×
607
    }
608
    GC_finish_collection();
12,439✔
609
#   ifndef NO_CLOCK
610
      if (start_time_valid) {
12,439✔
611
        CLOCK_TYPE current_time;
612
        unsigned long time_diff, ns_frac_diff;
613

614
        GET_TIME(current_time);
2,088✔
615
        time_diff = MS_TIME_DIFF(current_time, start_time);
2,088✔
616
        ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
2,088✔
617
        if (measure_performance) {
2,088✔
618
          full_gc_total_time += time_diff; /* may wrap */
2,088✔
619
          full_gc_total_ns_frac += (unsigned)ns_frac_diff;
2,088✔
620
          if (full_gc_total_ns_frac >= 1000000U) {
2,088✔
621
            /* Overflow of the nanoseconds part. */
622
            full_gc_total_ns_frac -= 1000000U;
1,050✔
623
            full_gc_total_time++;
1,050✔
624
          }
625
        }
626
        if (GC_print_stats)
2,088✔
627
          GC_log_printf("Complete collection took %lu ms %lu ns\n",
×
628
                        time_diff, ns_frac_diff);
629
      }
630
#   endif
631
    if (GC_on_collection_event)
12,439✔
632
      GC_on_collection_event(GC_EVENT_END);
×
633
    return TRUE;
12,439✔
634
}
635

636
/* The number of extra calls to GC_mark_some that we have made. */
637
STATIC int GC_deficit = 0;
638

639
/* The default value of GC_rate.        */
640
#ifndef GC_RATE
641
# define GC_RATE 10
642
#endif
643

644
/* When GC_collect_a_little_inner() performs n units of GC work, a unit */
645
/* is intended to touch roughly GC_rate pages.  (But, every once in     */
646
/* a while, we do more than that.)  This needs to be a fairly large     */
647
/* number with our current incremental GC strategy, since otherwise we  */
648
/* allocate too much during GC, and the cleanup gets expensive.         */
649
STATIC int GC_rate = GC_RATE;
650

651
GC_API void GC_CALL GC_set_rate(int value)
2✔
652
{
653
    GC_ASSERT(value > 0);
2✔
654
    GC_rate = value;
2✔
655
}
2✔
656

657
GC_API int GC_CALL GC_get_rate(void)
2✔
658
{
659
    return GC_rate;
2✔
660
}
661

662
/* The default maximum number of prior attempts at world stop marking.  */
663
#ifndef MAX_PRIOR_ATTEMPTS
664
# define MAX_PRIOR_ATTEMPTS 3
665
#endif
666

667
/* The maximum number of prior attempts at world stop marking.          */
668
/* A value of 1 means that we finish the second time, no matter how     */
669
/* long it takes.  Does not count the initial root scan for a full GC.  */
670
static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
671

672
GC_API void GC_CALL GC_set_max_prior_attempts(int value)
2✔
673
{
674
    GC_ASSERT(value >= 0);
2✔
675
    max_prior_attempts = value;
2✔
676
}
2✔
677

678
GC_API int GC_CALL GC_get_max_prior_attempts(void)
2✔
679
{
680
    return max_prior_attempts;
2✔
681
}
682

683
GC_INNER void GC_collect_a_little_inner(int n)
9,072,560✔
684
{
685
    IF_CANCEL(int cancel_state;)
686

687
    GC_ASSERT(I_HOLD_LOCK());
9,072,560✔
688
    GC_ASSERT(GC_is_initialized);
9,072,560✔
689
    if (GC_dont_gc) return;
9,072,560✔
690

691
    DISABLE_CANCEL(cancel_state);
8,025,418✔
692
    if (GC_incremental && GC_collection_in_progress()) {
8,025,418✔
693
        int i;
694
        int max_deficit = GC_rate * n;
×
695

696
#       ifdef PARALLEL_MARK
697
            if (GC_time_limit != GC_TIME_UNLIMITED)
×
698
                GC_parallel_mark_disabled = TRUE;
×
699
#       endif
700
        for (i = GC_deficit; i < max_deficit; i++) {
×
701
            if (GC_mark_some(NULL))
×
702
                break;
×
703
        }
704
#       ifdef PARALLEL_MARK
705
            GC_parallel_mark_disabled = FALSE;
×
706
#       endif
707

708
        if (i < max_deficit) {
×
709
            GC_ASSERT(!GC_collection_in_progress());
×
710
            /* Need to follow up with a full collection.        */
711
#           ifdef SAVE_CALL_CHAIN
712
                GC_save_callers(GC_last_stack);
713
#           endif
714
#           ifdef PARALLEL_MARK
715
                if (GC_parallel)
×
716
                    GC_wait_for_reclaim();
×
717
#           endif
718
#           ifndef NO_CLOCK
719
                if (GC_time_limit != GC_TIME_UNLIMITED
×
720
                        && GC_n_attempts < max_prior_attempts)
×
721
                    GET_TIME(GC_start_time);
×
722
#           endif
723
            if (GC_stopped_mark(GC_n_attempts < max_prior_attempts ?
×
724
                                GC_timeout_stop_func : GC_never_stop_func)) {
725
                GC_finish_collection();
×
726
            } else {
727
                GC_n_attempts++;
×
728
            }
729
        }
730
        if (GC_deficit > 0) {
×
731
            GC_deficit -= max_deficit;
×
732
            if (GC_deficit < 0)
×
733
                GC_deficit = 0;
×
734
        }
735
    } else {
736
        GC_maybe_gc();
8,025,418✔
737
    }
738
    RESTORE_CANCEL(cancel_state);
8,025,432✔
739
}
740

741
GC_INNER void (*GC_check_heap)(void) = 0;
742
GC_INNER void (*GC_print_all_smashed)(void) = 0;
743

744
GC_API int GC_CALL GC_collect_a_little(void)
5,252,467✔
745
{
746
    int result;
747

748
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
5,252,467✔
749
    LOCK();
5,252,548✔
750
    ENTER_GC();
5,252,551✔
751
    GC_collect_a_little_inner(1);
5,252,551✔
752
    EXIT_GC();
5,252,551✔
753
    result = (int)GC_collection_in_progress();
5,252,551✔
754
    UNLOCK();
5,252,551✔
755
    if (!result && GC_debugging_started) GC_print_all_smashed();
5,252,538✔
756
    return result;
5,252,538✔
757
}
758

759
#ifndef NO_CLOCK
760
  /* Variables for world-stop average delay time statistic computation. */
761
  /* "divisor" is incremented every world-stop and halved when reached  */
762
  /* its maximum (or upon "total_time" overflow).                       */
763
  static unsigned world_stopped_total_time = 0;
764
  static unsigned world_stopped_total_divisor = 0;
765
# ifndef MAX_TOTAL_TIME_DIVISOR
766
    /* We shall not use big values here (so "outdated" delay time       */
767
    /* values would have less impact on "average" delay time value than */
768
    /* newer ones).                                                     */
769
#   define MAX_TOTAL_TIME_DIVISOR 1000
770
# endif
771
#endif /* !NO_CLOCK */
772

773
#ifdef USE_MUNMAP
774
# ifndef MUNMAP_THRESHOLD
775
#   define MUNMAP_THRESHOLD 7
776
# endif
777
  GC_INNER unsigned GC_unmap_threshold = MUNMAP_THRESHOLD;
778

779
# define IF_USE_MUNMAP(x) x
780
# define COMMA_IF_USE_MUNMAP(x) /* comma */, x
781
#else
782
# define IF_USE_MUNMAP(x) /* empty */
783
# define COMMA_IF_USE_MUNMAP(x) /* empty */
784
#endif
785

786
/* We stop the world and mark from all roots.  If stop_func() ever      */
787
/* returns TRUE, we may fail and return FALSE.  Increment GC_gc_no if   */
788
/* we succeed.                                                          */
789
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
29,449✔
790
{
791
    int i;
792
    ptr_t cold_gc_frame = GC_approx_sp();
29,449✔
793
#   ifndef NO_CLOCK
794
      CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
29,449✔
795
#   endif
796

797
    GC_ASSERT(I_HOLD_LOCK());
29,449✔
798
    GC_ASSERT(GC_is_initialized);
29,449✔
799
#   if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
800
        GC_add_current_malloc_heap();
801
#   endif
802
#   if defined(REGISTER_LIBRARIES_EARLY)
803
        GC_cond_register_dynamic_libraries();
29,449✔
804
#   endif
805

806
#   ifndef NO_CLOCK
807
      if (GC_PRINT_STATS_FLAG)
29,449✔
808
        GET_TIME(start_time);
×
809
#   endif
810

811
#   if !defined(GC_NO_FINALIZATION) && !defined(GC_TOGGLE_REFS_NOT_NEEDED)
812
      GC_process_togglerefs();
29,449✔
813
#   endif
814
#   ifdef THREADS
815
      if (GC_on_collection_event)
29,449✔
816
        GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD);
×
817
#   endif
818
    STOP_WORLD();
29,449✔
819
#   ifdef THREADS
820
      if (GC_on_collection_event)
29,449✔
821
        GC_on_collection_event(GC_EVENT_POST_STOP_WORLD);
×
822
#   endif
823

824
#   ifdef THREAD_LOCAL_ALLOC
825
      GC_world_stopped = TRUE;
29,449✔
826
#   endif
827
        /* Output blank line for convenience here */
828
    GC_COND_LOG_PRINTF(
29,449✔
829
              "\n--> Marking for collection #%lu after %lu allocated bytes\n",
830
              (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
×
831
#   ifdef MAKE_BACK_GRAPH
832
      if (GC_print_back_height) {
833
        GC_build_back_graph();
834
      }
835
#   endif
836

837
    /* Mark from all roots.  */
838
        if (GC_on_collection_event)
29,449✔
839
          GC_on_collection_event(GC_EVENT_MARK_START);
×
840

841
        /* Minimize junk left in my registers and on the stack */
842
            GC_clear_a_few_frames();
29,449✔
843
            GC_noop6(0,0,0,0,0,0);
29,449✔
844

845
        GC_initiate_gc();
29,449✔
846
#       ifdef PARALLEL_MARK
847
          if (stop_func != GC_never_stop_func)
29,449✔
848
            GC_parallel_mark_disabled = TRUE;
17,010✔
849
#       endif
850
        for (i = 0; !(*stop_func)(); i++) {
7,563,830✔
851
          if (GC_mark_some(cold_gc_frame)) {
7,563,830✔
852
#           ifdef PARALLEL_MARK
853
              if (GC_parallel && GC_parallel_mark_disabled) {
29,449✔
854
                GC_COND_LOG_PRINTF("Stopped marking done after %d iterations"
8,902✔
855
                                   " with disabled parallel marker\n", i);
856
              }
857
#           endif
858
            i = -1;
29,449✔
859
            break;
29,449✔
860
          }
861
        }
862
#       ifdef PARALLEL_MARK
863
          GC_parallel_mark_disabled = FALSE;
29,449✔
864
#       endif
865

866
        if (i >= 0) {
29,449✔
867
          GC_COND_LOG_PRINTF("Abandoned stopped marking after"
×
868
                             " %d iterations\n", i);
869
          GC_deficit = i;       /* Give the mutator a chance.   */
×
870
#         ifdef THREAD_LOCAL_ALLOC
871
            GC_world_stopped = FALSE;
×
872
#         endif
873

874
#         ifdef THREADS
875
            if (GC_on_collection_event)
×
876
              GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
×
877
#         endif
878

879
          START_WORLD();
×
880

881
#         ifdef THREADS
882
            if (GC_on_collection_event)
×
883
              GC_on_collection_event(GC_EVENT_POST_START_WORLD);
×
884
#         endif
885

886
          /* TODO: Notify GC_EVENT_MARK_ABANDON */
887
          return FALSE;
29,459✔
888
        }
889

890
    GC_gc_no++;
29,449✔
891
    /* Check all debugged objects for consistency */
892
    if (GC_debugging_started) {
29,449✔
893
      (*GC_check_heap)();
248✔
894
    }
895
    if (GC_on_collection_event) {
29,449✔
896
      GC_on_collection_event(GC_EVENT_MARK_END);
×
897
#     ifdef THREADS
898
        GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
×
899
#     endif
900
    }
901
#   ifdef THREAD_LOCAL_ALLOC
902
      GC_world_stopped = FALSE;
29,449✔
903
#   endif
904

905
    START_WORLD();
29,449✔
906

907
#   ifdef THREADS
908
      if (GC_on_collection_event)
29,459✔
909
        GC_on_collection_event(GC_EVENT_POST_START_WORLD);
×
910
#   endif
911

912
#   ifndef NO_CLOCK
913
      if (GC_PRINT_STATS_FLAG) {
29,459✔
914
        unsigned long time_diff;
915
        unsigned total_time, divisor;
916
        CLOCK_TYPE current_time;
917

918
        GET_TIME(current_time);
×
919
        time_diff = MS_TIME_DIFF(current_time,start_time);
×
920

921
        /* Compute new world-stop delay total time */
922
        total_time = world_stopped_total_time;
×
923
        divisor = world_stopped_total_divisor;
×
924
        if (total_time > (((unsigned)-1) >> 1)
×
925
            || divisor >= MAX_TOTAL_TIME_DIVISOR) {
×
926
          /* Halve values if overflow occurs */
927
          total_time >>= 1;
×
928
          divisor >>= 1;
×
929
        }
930
        total_time += time_diff < (((unsigned)-1) >> 1) ?
×
931
                        (unsigned)time_diff : ((unsigned)-1) >> 1;
×
932
        /* Update old world_stopped_total_time and its divisor */
933
        world_stopped_total_time = total_time;
×
934
        world_stopped_total_divisor = ++divisor;
×
935

936
        GC_ASSERT(divisor != 0);
×
937
        GC_log_printf("World-stopped marking took %lu ms %lu ns"
×
938
                      " (%u ms in average)\n",
939
                      time_diff, NS_FRAC_TIME_DIFF(current_time, start_time),
×
940
                      total_time / divisor);
941
      }
942
#   endif
943
    return TRUE;
29,459✔
944
}
945

946
/* Set all mark bits for the free list whose first entry is q   */
947
GC_INNER void GC_set_fl_marks(ptr_t q)
1,245,080✔
948
{
949
    if (q /* != NULL */) { /* CPPCHECK */
1,245,080✔
950
      struct hblk *h = HBLKPTR(q);
1,245,080✔
951
      struct hblk *last_h = h;
1,245,080✔
952
      hdr *hhdr = HDR(h);
1,245,080✔
953
      IF_PER_OBJ(word sz = hhdr->hb_sz;)
954

955
      for (;;) {
956
        word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
35,595,689✔
957

958
        if (!mark_bit_from_hdr(hhdr, bit_no)) {
35,595,689✔
959
          set_mark_bit_from_hdr(hhdr, bit_no);
16,192,010✔
960
          ++hhdr -> hb_n_marks;
16,192,010✔
961
        }
962

963
        q = (ptr_t)obj_link(q);
35,595,689✔
964
        if (q == NULL)
35,595,689✔
965
          break;
1,245,080✔
966

967
        h = HBLKPTR(q);
34,350,609✔
968
        if (h != last_h) {
34,350,609✔
969
          last_h = h;
54,280✔
970
          hhdr = HDR(h);
54,280✔
971
          IF_PER_OBJ(sz = hhdr->hb_sz;)
972
        }
973
      }
34,350,609✔
974
    }
975
}
1,245,080✔
976

977
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
978
  /* Check that all mark bits for the free list whose first entry is    */
979
  /* (*pfreelist) are set.  Check skipped if points to a special value. */
980
  void GC_check_fl_marks(void **pfreelist)
20,791,294✔
981
  {
982
    /* TODO: There is a data race with GC_FAST_MALLOC_GRANS (which does */
983
    /* not do atomic updates to the free-list).  The race seems to be   */
984
    /* harmless, and for now we just skip this check in case of TSan.   */
985
#   if defined(AO_HAVE_load_acquire_read) && !defined(THREAD_SANITIZER)
986
      AO_t *list = (AO_t *)AO_load_acquire_read((AO_t *)pfreelist);
20,791,294✔
987
                /* Atomic operations are used because the world is running. */
988
      AO_t *prev;
989
      AO_t *p;
990

991
      if ((word)list <= HBLKSIZE) return;
20,791,294✔
992

993
      prev = (AO_t *)pfreelist;
1,244,972✔
994
      for (p = list; p != NULL;) {
38,089,675✔
995
        AO_t *next;
996

997
        if (!GC_is_marked(p)) {
35,599,729✔
998
          ABORT_ARG2("Unmarked local free list entry",
×
999
                     ": object %p on list %p", (void *)p, (void *)list);
1000
        }
1001

1002
        /* While traversing the free-list, it re-reads the pointer to   */
1003
        /* the current node before accepting its next pointer and       */
1004
        /* bails out if the latter has changed.  That way, it won't     */
1005
        /* try to follow the pointer which might be been modified       */
1006
        /* after the object was returned to the client.  It might       */
1007
        /* perform the mark-check on the just allocated object but      */
1008
        /* that should be harmless.                                     */
1009
        next = (AO_t *)AO_load_acquire_read(p);
35,599,731✔
1010
        if (AO_load(prev) != (AO_t)p)
35,599,731✔
1011
          break;
×
1012
        prev = p;
35,599,731✔
1013
        p = next;
35,599,731✔
1014
      }
1015
#   else
1016
      /* FIXME: Not implemented (just skipped). */
1017
      (void)pfreelist;
1018
#   endif
1019
  }
1020
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
1021

1022
/* Clear all mark bits for the free list whose first entry is q */
1023
/* Decrement GC_bytes_found by number of bytes on free list.    */
1024
STATIC void GC_clear_fl_marks(ptr_t q)
50,476✔
1025
{
1026
      struct hblk *h = HBLKPTR(q);
50,476✔
1027
      struct hblk *last_h = h;
50,476✔
1028
      hdr *hhdr = HDR(h);
50,476✔
1029
      word sz = hhdr->hb_sz; /* Normally set only once. */
50,476✔
1030

1031
      for (;;) {
1032
        word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
4,749,931✔
1033

1034
        if (mark_bit_from_hdr(hhdr, bit_no)) {
4,749,931✔
1035
          size_t n_marks = hhdr -> hb_n_marks;
1,121,496✔
1036

1037
          GC_ASSERT(n_marks != 0);
1,121,496✔
1038
          clear_mark_bit_from_hdr(hhdr, bit_no);
1,121,496✔
1039
          n_marks--;
1,121,496✔
1040
#         ifdef PARALLEL_MARK
1041
            /* Appr. count, don't decrement to zero! */
1042
            if (0 != n_marks || !GC_parallel) {
1,121,496✔
1043
              hhdr -> hb_n_marks = n_marks;
1,120,812✔
1044
            }
1045
#         else
1046
            hhdr -> hb_n_marks = n_marks;
1047
#         endif
1048
        }
1049
        GC_bytes_found -= sz;
4,749,931✔
1050

1051
        q = (ptr_t)obj_link(q);
4,749,931✔
1052
        if (q == NULL)
4,749,931✔
1053
          break;
50,476✔
1054

1055
        h = HBLKPTR(q);
4,699,455✔
1056
        if (h != last_h) {
4,699,455✔
1057
          last_h = h;
489,610✔
1058
          hhdr = HDR(h);
489,610✔
1059
          sz = hhdr->hb_sz;
489,610✔
1060
        }
1061
      }
4,699,455✔
1062
}
50,476✔
1063

1064
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1065
  void GC_check_tls(void);
1066
#endif
1067

1068
GC_on_heap_resize_proc GC_on_heap_resize = 0;
1069

1070
/* Used for logging only. */
1071
GC_INLINE int GC_compute_heap_usage_percent(void)
×
1072
{
1073
  word used = GC_composite_in_use + GC_atomic_in_use + GC_bytes_allocd;
×
1074
  word heap_sz = GC_heapsize - GC_unmapped_bytes;
×
1075
# if defined(CPPCHECK)
1076
    word limit = (GC_WORD_MAX >> 1) / 50; /* to avoid a false positive */
1077
# else
1078
    const word limit = GC_WORD_MAX / 100;
×
1079
# endif
1080

1081
  return used >= heap_sz ? 0 : used < limit ?
×
1082
                (int)((used * 100) / heap_sz) : (int)(used / (heap_sz / 100));
×
1083
}
1084

1085
#define GC_DBGLOG_PRINT_HEAP_IN_USE() \
1086
  GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n", \
1087
                   GC_compute_heap_usage_percent(), \
1088
                   TO_KiB_UL(GC_composite_in_use), \
1089
                   TO_KiB_UL(GC_atomic_in_use + GC_bytes_allocd))
1090

1091
/* Finish up a collection.  Assumes mark bits are consistent, but the   */
1092
/* world is otherwise running.                                          */
1093
STATIC void GC_finish_collection(void)
29,459✔
1094
{
1095
#   ifndef NO_CLOCK
1096
      CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
29,459✔
1097
      CLOCK_TYPE finalize_time = CLOCK_TYPE_INITIALIZER;
29,459✔
1098
#   endif
1099

1100
    GC_ASSERT(I_HOLD_LOCK());
29,459✔
1101
#   if defined(GC_ASSERTIONS) \
1102
       && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1103
        /* Check that we marked some of our own data.           */
1104
        /* TODO: Add more checks. */
1105
        GC_check_tls();
29,459✔
1106
#   endif
1107

1108
#   ifndef NO_CLOCK
1109
      if (GC_print_stats)
29,461✔
1110
        GET_TIME(start_time);
×
1111
#   endif
1112
    if (GC_on_collection_event)
29,461✔
1113
      GC_on_collection_event(GC_EVENT_RECLAIM_START);
×
1114

1115
#   ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1116
      if (GC_bytes_found > 0)
29,461✔
1117
        GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
29,135✔
1118
#   endif
1119
    GC_bytes_found = 0;
29,461✔
1120
#   if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1121
        if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
29,461✔
1122
          GC_print_address_map();
×
1123
        }
1124
#   endif
1125
    COND_DUMP;
29,461✔
1126
    if (GC_find_leak) {
29,461✔
1127
      /* Mark all objects on the free list.  All objects should be      */
1128
      /* marked when we're done.                                        */
1129
      word size;        /* current object size  */
1130
      unsigned kind;
1131
      ptr_t q;
1132

1133
      for (kind = 0; kind < GC_n_kinds; kind++) {
330✔
1134
        for (size = 1; size <= MAXOBJGRANULES; size++) {
34,056✔
1135
          q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
33,792✔
1136
          if (q != NULL)
33,792✔
1137
            GC_set_fl_marks(q);
539✔
1138
        }
1139
      }
1140
      GC_start_reclaim(TRUE);
66✔
1141
        /* The above just checks; it doesn't really reclaim anything.   */
1142
    }
1143

1144
#   ifndef GC_NO_FINALIZATION
1145
      GC_finalize();
29,461✔
1146
#   endif
1147
#   ifndef NO_CLOCK
1148
      if (GC_print_stats)
29,460✔
1149
        GET_TIME(finalize_time);
×
1150
#   endif
1151

1152
    if (GC_print_back_height) {
29,460✔
1153
#     ifdef MAKE_BACK_GRAPH
1154
        GC_traverse_back_graph();
1155
#     elif !defined(SMALL_CONFIG)
1156
        GC_err_printf("Back height not available: "
×
1157
                      "Rebuild collector with -DMAKE_BACK_GRAPH\n");
1158
#     endif
1159
    }
1160

1161
    /* Clear free list mark bits, in case they got accidentally marked   */
1162
    /* (or GC_find_leak is set and they were intentionally marked).      */
1163
    /* Also subtract memory remaining from GC_bytes_found count.         */
1164
    /* Note that composite objects on free list are cleared.             */
1165
    /* Thus accidentally marking a free list is not a problem;  only     */
1166
    /* objects on the list itself will be marked, and that's fixed here. */
1167
    {
1168
      word size;        /* current object size          */
1169
      ptr_t q;          /* pointer to current object    */
1170
      unsigned kind;
1171

1172
      for (kind = 0; kind < GC_n_kinds; kind++) {
163,547✔
1173
        for (size = 1; size <= MAXOBJGRANULES; size++) {
17,297,223✔
1174
          q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
17,163,136✔
1175
          if (q != NULL)
17,163,136✔
1176
            GC_clear_fl_marks(q);
50,476✔
1177
        }
1178
      }
1179
    }
1180

1181
    GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
29,460✔
1182
                          (long)GC_bytes_found);
1183

1184
    /* Reconstruct free lists to contain everything not marked */
1185
    GC_start_reclaim(FALSE);
29,460✔
1186

1187
#   ifdef USE_MUNMAP
1188
      if (GC_unmap_threshold > 0 /* unmapping enabled? */
29,462✔
1189
          && EXPECT(GC_gc_no != 1, TRUE)) /* do not unmap during GC init */
29,462✔
1190
        GC_unmap_old(GC_unmap_threshold);
29,430✔
1191

1192
      GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
29,462✔
1193
#   endif
1194
    GC_ASSERT(GC_our_mem_bytes >= GC_heapsize);
29,462✔
1195
    GC_DBGLOG_PRINTF("GC #%lu freed %ld bytes, heap %lu KiB ("
29,462✔
1196
                     IF_USE_MUNMAP("+ %lu KiB unmapped ")
1197
                     "+ %lu KiB internal)\n",
1198
                     (unsigned long)GC_gc_no, (long)GC_bytes_found,
1199
                     TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
×
1200
                     COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)),
×
1201
                     TO_KiB_UL(GC_our_mem_bytes - GC_heapsize
×
1202
                               + sizeof(GC_arrays)));
1203
    GC_DBGLOG_PRINT_HEAP_IN_USE();
29,462✔
1204
    if (GC_is_full_gc) {
29,462✔
1205
        GC_used_heap_size_after_full = GC_heapsize - GC_large_free_bytes;
13,865✔
1206
        GC_need_full_gc = FALSE;
13,865✔
1207
    } else {
1208
        GC_need_full_gc = GC_heapsize - GC_used_heap_size_after_full
31,194✔
1209
                          > min_bytes_allocd() + GC_large_free_bytes;
15,597✔
1210
    }
1211

1212
    /* Reset or increment counters for next cycle */
1213
    GC_n_attempts = 0;
29,462✔
1214
    GC_is_full_gc = FALSE;
29,462✔
1215
    GC_bytes_allocd_before_gc += GC_bytes_allocd;
29,462✔
1216
    GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
29,462✔
1217
    GC_bytes_allocd = 0;
29,462✔
1218
    GC_bytes_dropped = 0;
29,462✔
1219
    GC_bytes_freed = 0;
29,462✔
1220
    GC_finalizer_bytes_freed = 0;
29,462✔
1221

1222
    if (GC_on_collection_event)
29,462✔
1223
      GC_on_collection_event(GC_EVENT_RECLAIM_END);
×
1224
#   ifndef NO_CLOCK
1225
      if (GC_print_stats) {
29,462✔
1226
        CLOCK_TYPE done_time;
1227

1228
        GET_TIME(done_time);
×
1229
#       if !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1230
          /* A convenient place to output finalization statistics.      */
1231
          GC_print_finalization_stats();
×
1232
#       endif
1233
        GC_log_printf("Finalize and initiate sweep took %lu ms %lu ns"
×
1234
                      " + %lu ms %lu ns\n",
1235
                      MS_TIME_DIFF(finalize_time, start_time),
×
1236
                      NS_FRAC_TIME_DIFF(finalize_time, start_time),
×
1237
                      MS_TIME_DIFF(done_time, finalize_time),
×
1238
                      NS_FRAC_TIME_DIFF(done_time, finalize_time));
×
1239
      }
1240
#   elif !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1241
      if (GC_print_stats)
1242
        GC_print_finalization_stats();
1243
#   endif
1244
}
29,462✔
1245

1246
STATIC word GC_heapsize_at_forced_unmap = 0;
1247
                                /* accessed with the allocation lock held */
1248

1249
/* If stop_func == 0 then GC_default_stop_func is used instead.         */
1250
STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
2,468✔
1251
                                         GC_bool force_unmap)
1252
{
1253
    GC_bool result;
1254
    IF_USE_MUNMAP(int old_unmap_threshold;)
1255
    IF_CANCEL(int cancel_state;)
1256

1257
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
2,468✔
1258
    if (GC_debugging_started) GC_print_all_smashed();
2,468✔
1259
    GC_INVOKE_FINALIZERS();
2,468✔
1260
    LOCK();
2,468✔
1261
    if (force_unmap) {
2,468✔
1262
      /* Record current heap size to make heap growth more conservative */
1263
      /* afterwards (as if the heap is growing from zero size again).   */
1264
      GC_heapsize_at_forced_unmap = GC_heapsize;
70✔
1265
    }
1266
    DISABLE_CANCEL(cancel_state);
2,468✔
1267
#   ifdef USE_MUNMAP
1268
      old_unmap_threshold = GC_unmap_threshold;
2,468✔
1269
      if (force_unmap ||
4,866✔
1270
          (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
2,398✔
1271
        GC_unmap_threshold = 1; /* unmap as much as possible */
70✔
1272
#   endif
1273
    ENTER_GC();
2,468✔
1274
    /* Minimize junk left in my registers */
1275
      GC_noop6(0,0,0,0,0,0);
2,468✔
1276
    result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
2,468✔
1277
                                     GC_default_stop_func);
1278
    EXIT_GC();
2,468✔
1279
    IF_USE_MUNMAP(GC_unmap_threshold = old_unmap_threshold); /* restore */
2,468✔
1280
    RESTORE_CANCEL(cancel_state);
2,468✔
1281
    UNLOCK();
2,468✔
1282
    if (result) {
2,468✔
1283
        if (GC_debugging_started) GC_print_all_smashed();
2,240✔
1284
        GC_INVOKE_FINALIZERS();
2,240✔
1285
    }
1286
    return result;
2,468✔
1287
}
1288

1289
/* Externally callable routines to invoke full, stop-the-world collection. */
1290

1291
GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
×
1292
{
1293
    GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
×
1294
    return (int)GC_try_to_collect_general(stop_func, FALSE);
×
1295
}
1296

1297
GC_API void GC_CALL GC_gcollect(void)
2,398✔
1298
{
1299
    /* 0 is passed as stop_func to get GC_default_stop_func value       */
1300
    /* while holding the allocation lock (to prevent data races).       */
1301
    (void)GC_try_to_collect_general(0, FALSE);
2,398✔
1302
    if (get_have_errors())
2,398✔
1303
      GC_print_all_errors();
56✔
1304
}
2,398✔
1305

1306
GC_API void GC_CALL GC_gcollect_and_unmap(void)
70✔
1307
{
1308
    /* Collect and force memory unmapping to OS. */
1309
    (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
70✔
1310
}
70✔
1311

1312
#ifdef USE_PROC_FOR_LIBRARIES
1313
  /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
1314
  GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
1315
  {
1316
    GC_ASSERT(I_HOLD_LOCK());
1317
    GC_ASSERT(p != NULL);
1318
    if (GC_n_memory >= MAX_HEAP_SECTS)
1319
      ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1320
    GC_our_memory[GC_n_memory].hs_start = p;
1321
    GC_our_memory[GC_n_memory].hs_bytes = bytes;
1322
    GC_n_memory++;
1323
    GC_our_mem_bytes += bytes;
1324
  }
1325
#endif
1326

1327
/* Use the chunk of memory starting at p of size bytes as part of the heap. */
1328
/* Assumes p is HBLKSIZE aligned, bytes argument is a multiple of HBLKSIZE. */
1329
STATIC void GC_add_to_heap(struct hblk *p, size_t bytes)
671✔
1330
{
1331
    hdr * phdr;
1332
    word endp;
1333
    size_t old_capacity = 0;
671✔
1334
    void *old_heap_sects = NULL;
671✔
1335
#   ifdef GC_ASSERTIONS
1336
      unsigned i;
1337
#   endif
1338

1339
    GC_ASSERT(I_HOLD_LOCK());
671✔
1340
    GC_ASSERT((word)p % HBLKSIZE == 0);
671✔
1341
    GC_ASSERT(bytes % HBLKSIZE == 0);
671✔
1342
    GC_ASSERT(bytes > 0);
671✔
1343
    GC_ASSERT(GC_all_nils != NULL);
671✔
1344

1345
    if (EXPECT(GC_n_heap_sects == GC_capacity_heap_sects, FALSE)) {
671✔
1346
      /* Allocate new GC_heap_sects with sufficient capacity.   */
1347
#     ifndef INITIAL_HEAP_SECTS
1348
#       define INITIAL_HEAP_SECTS 32
1349
#     endif
1350
      size_t new_capacity = GC_n_heap_sects > 0 ?
68✔
1351
                (size_t)GC_n_heap_sects * 2 : INITIAL_HEAP_SECTS;
34✔
1352
      void *new_heap_sects =
34✔
1353
                GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
34✔
1354

1355
      if (NULL == new_heap_sects) {
34✔
1356
        /* Retry with smaller yet sufficient capacity.  */
1357
        new_capacity = (size_t)GC_n_heap_sects + INITIAL_HEAP_SECTS;
×
1358
        new_heap_sects =
×
1359
                GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
×
1360
        if (NULL == new_heap_sects)
×
1361
          ABORT("Insufficient memory for heap sections");
×
1362
      }
1363
      old_capacity = GC_capacity_heap_sects;
34✔
1364
      old_heap_sects = GC_heap_sects;
34✔
1365
      /* Transfer GC_heap_sects contents to the newly allocated array.  */
1366
      if (GC_n_heap_sects > 0)
34✔
1367
        BCOPY(old_heap_sects, new_heap_sects,
2✔
1368
              GC_n_heap_sects * sizeof(struct HeapSect));
1369
      GC_capacity_heap_sects = new_capacity;
34✔
1370
      GC_heap_sects = (struct HeapSect *)new_heap_sects;
34✔
1371
      GC_COND_LOG_PRINTF("Grew heap sections array to %lu elements\n",
34✔
1372
                         (unsigned long)new_capacity);
1373
    }
1374

1375
    while (EXPECT((word)p <= HBLKSIZE, FALSE)) {
1,342✔
1376
        /* Can't handle memory near address zero. */
1377
        ++p;
×
1378
        bytes -= HBLKSIZE;
×
1379
        if (0 == bytes) return;
×
1380
    }
1381
    endp = (word)p + bytes;
671✔
1382
    if (EXPECT(endp <= (word)p, FALSE)) {
671✔
1383
        /* Address wrapped. */
1384
        bytes -= HBLKSIZE;
×
1385
        if (0 == bytes) return;
×
1386
        endp -= HBLKSIZE;
×
1387
    }
1388
    phdr = GC_install_header(p);
671✔
1389
    if (EXPECT(NULL == phdr, FALSE)) {
671✔
1390
        /* This is extremely unlikely. Can't add it.  This will         */
1391
        /* almost certainly result in a 0 return from the allocator,    */
1392
        /* which is entirely appropriate.                               */
1393
        return;
×
1394
    }
1395
    GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
671✔
1396
#   ifdef GC_ASSERTIONS
1397
      /* Ensure no intersection between sections.       */
1398
      for (i = 0; i < GC_n_heap_sects; i++) {
10,729✔
1399
        word hs_start = (word)GC_heap_sects[i].hs_start;
10,058✔
1400
        word hs_end = hs_start + GC_heap_sects[i].hs_bytes;
10,058✔
1401

1402
        GC_ASSERT(!((hs_start <= (word)p && (word)p < hs_end)
10,058✔
1403
                    || (hs_start < endp && endp <= hs_end)
1404
                    || ((word)p < hs_start && hs_end < endp)));
1405
      }
1406
#   endif
1407
    GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
671✔
1408
    GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
671✔
1409
    GC_n_heap_sects++;
671✔
1410
    phdr -> hb_sz = bytes;
671✔
1411
    phdr -> hb_flags = 0;
671✔
1412
    GC_freehblk(p);
671✔
1413
    GC_heapsize += bytes;
671✔
1414

1415
    if ((word)p <= (word)GC_least_plausible_heap_addr
671✔
1416
        || EXPECT(NULL == GC_least_plausible_heap_addr, FALSE)) {
635✔
1417
        GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
36✔
1418
                /* Making it a little smaller than necessary prevents   */
1419
                /* us from getting a false hit from the variable        */
1420
                /* itself.  There's some unintentional reflection       */
1421
                /* here.                                                */
1422
    }
1423
    if (endp > (word)GC_greatest_plausible_heap_addr) {
671✔
1424
        GC_greatest_plausible_heap_addr = (void *)endp;
×
1425
    }
1426

1427
    if (EXPECT(old_capacity > 0, FALSE)) {
671✔
1428
#     ifndef GWW_VDB
1429
        /* Recycling may call GC_add_to_heap() again but should not     */
1430
        /* cause resizing of GC_heap_sects.                             */
1431
        GC_scratch_recycle_no_gww(old_heap_sects,
2✔
1432
                                  old_capacity * sizeof(struct HeapSect));
1433
#     else
1434
        /* TODO: implement GWW-aware recycling as in alloc_mark_stack */
1435
        GC_noop1((word)old_heap_sects);
1436
#     endif
1437
    }
1438
}
1439

1440
#if !defined(NO_DEBUGGING)
1441
  void GC_print_heap_sects(void)
×
1442
  {
1443
    unsigned i;
1444

1445
    GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
×
1446
              (unsigned long)GC_heapsize /*, */
×
1447
              COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
×
1448

1449
    for (i = 0; i < GC_n_heap_sects; i++) {
×
1450
      ptr_t start = GC_heap_sects[i].hs_start;
×
1451
      size_t len = GC_heap_sects[i].hs_bytes;
×
1452
      struct hblk *h;
1453
      unsigned nbl = 0;
×
1454

1455
      for (h = (struct hblk *)start; (word)h < (word)(start + len); h++) {
×
1456
        if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
×
1457
      }
1458
      GC_printf("Section %d from %p to %p %u/%lu blacklisted\n",
×
1459
                i, (void *)start, (void *)&start[len],
1460
                nbl, (unsigned long)divHBLKSZ(len));
×
1461
    }
1462
  }
×
1463
#endif
1464

1465
void * GC_least_plausible_heap_addr = (void *)GC_WORD_MAX;
1466
void * GC_greatest_plausible_heap_addr = 0;
1467

1468
STATIC word GC_max_heapsize = 0;
1469

1470
GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
2✔
1471
{
1472
    GC_max_heapsize = n;
2✔
1473
}
2✔
1474

1475
word GC_max_retries = 0;
1476

1477
GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes)
260✔
1478
{
1479
  size_t page_offset;
1480
  size_t displ = 0;
260✔
1481
  size_t recycled_bytes;
1482

1483
  GC_ASSERT(I_HOLD_LOCK());
260✔
1484
  if (NULL == ptr) return;
260✔
1485

1486
  GC_ASSERT(bytes != 0);
260✔
1487
  GC_ASSERT(GC_page_size != 0);
260✔
1488
  /* TODO: Assert correct memory flags if GWW_VDB */
1489
  page_offset = (word)ptr & (GC_page_size - 1);
260✔
1490
  if (page_offset != 0)
260✔
1491
    displ = GC_page_size - page_offset;
×
1492
  recycled_bytes = bytes > displ ? (bytes - displ) & ~(GC_page_size - 1) : 0;
260✔
1493
  GC_COND_LOG_PRINTF("Recycle %lu/%lu scratch-allocated bytes at %p\n",
260✔
1494
                (unsigned long)recycled_bytes, (unsigned long)bytes, ptr);
1495
  if (recycled_bytes > 0)
260✔
1496
    GC_add_to_heap((struct hblk *)((word)ptr + displ), recycled_bytes);
258✔
1497
}
1498

1499
/* This explicitly increases the size of the heap.  It is used          */
1500
/* internally, but may also be invoked from GC_expand_hp by the user.   */
1501
/* The argument is in units of HBLKSIZE (zero is treated as 1).         */
1502
/* Returns FALSE on failure.                                            */
1503
GC_INNER GC_bool GC_expand_hp_inner(word n)
431✔
1504
{
1505
    size_t bytes;
1506
    struct hblk * space;
1507
    word expansion_slop;        /* Number of bytes by which we expect   */
1508
                                /* the heap to expand soon.             */
1509

1510
    GC_ASSERT(I_HOLD_LOCK());
431✔
1511
    GC_ASSERT(GC_page_size != 0);
431✔
1512
    if (0 == n) n = 1;
431✔
1513
    bytes = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
431✔
1514
    GC_DBGLOG_PRINT_HEAP_IN_USE();
431✔
1515
    if (GC_max_heapsize != 0
431✔
1516
        && (GC_max_heapsize < (word)bytes
22✔
1517
            || GC_heapsize > GC_max_heapsize - (word)bytes)) {
4✔
1518
        /* Exceeded self-imposed limit */
1519
        return FALSE;
18✔
1520
    }
1521
    space = GET_MEM(bytes);
413✔
1522
    if (EXPECT(NULL == space, FALSE)) {
413✔
1523
        WARN("Failed to expand heap by %" WARN_PRIuPTR " KiB\n", bytes >> 10);
×
1524
        return FALSE;
×
1525
    }
1526
    GC_add_to_our_memory((ptr_t)space, bytes);
413✔
1527
    GC_last_heap_growth_gc_no = GC_gc_no;
413✔
1528
    GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
413✔
1529
                      TO_KiB_UL(GC_heapsize + bytes),
×
1530
                      (unsigned long)GC_bytes_allocd);
×
1531

1532
    /* Adjust heap limits generously for blacklisting to work better.   */
1533
    /* GC_add_to_heap performs minimal adjustment needed for            */
1534
    /* correctness.                                                     */
1535
    expansion_slop = min_bytes_allocd() + 4 * MAXHINCR * HBLKSIZE;
413✔
1536
    if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
413✔
1537
        || (GC_last_heap_addr != 0
381✔
1538
            && (word)GC_last_heap_addr < (word)space)) {
415✔
1539
        /* Assume the heap is growing up. */
1540
        word new_limit = (word)space + (word)bytes + expansion_slop;
34✔
1541
        if (new_limit > (word)space
34✔
1542
            && (word)GC_greatest_plausible_heap_addr < new_limit)
34✔
1543
          GC_greatest_plausible_heap_addr = (void *)new_limit;
34✔
1544
    } else {
1545
        /* Heap is growing down. */
1546
        word new_limit = (word)space - expansion_slop;
379✔
1547
        if (new_limit < (word)space
379✔
1548
            && (word)GC_least_plausible_heap_addr > new_limit)
379✔
1549
          GC_least_plausible_heap_addr = (void *)new_limit;
369✔
1550
    }
1551
    GC_last_heap_addr = (ptr_t)space;
413✔
1552

1553
    GC_add_to_heap(space, bytes);
413✔
1554
    if (GC_on_heap_resize)
413✔
1555
        (*GC_on_heap_resize)(GC_heapsize);
×
1556

1557
    return TRUE;
413✔
1558
}
1559

1560
/* Really returns a bool, but it's externally visible, so that's clumsy. */
1561
GC_API int GC_CALL GC_expand_hp(size_t bytes)
4✔
1562
{
1563
    word n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(bytes);
4✔
1564
    word old_heapsize;
1565
    GC_bool result;
1566

1567
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
4✔
1568
    LOCK();
4✔
1569
    old_heapsize = GC_heapsize;
4✔
1570
    result = GC_expand_hp_inner(n_blocks);
4✔
1571
    if (result) {
4✔
1572
      GC_requested_heapsize += bytes;
4✔
1573
      if (GC_dont_gc) {
4✔
1574
        /* Do not call WARN if the heap growth is intentional.  */
1575
        GC_ASSERT(GC_heapsize >= old_heapsize);
2✔
1576
        GC_heapsize_on_gc_disable += GC_heapsize - old_heapsize;
2✔
1577
      }
1578
    }
1579
    UNLOCK();
4✔
1580
    return (int)result;
4✔
1581
}
1582

1583
GC_INNER unsigned GC_fail_count = 0;
1584
                        /* How many consecutive GC/expansion failures?  */
1585
                        /* Reset by GC_allochblk.                       */
1586

1587
/* The minimum value of the ratio of allocated bytes since the latest   */
1588
/* GC to the amount of finalizers created since that GC which triggers  */
1589
/* the collection instead heap expansion.  Has no effect in the         */
1590
/* incremental mode.                                                    */
1591
#if defined(GC_ALLOCD_BYTES_PER_FINALIZER) && !defined(CPPCHECK)
1592
  STATIC word GC_allocd_bytes_per_finalizer = GC_ALLOCD_BYTES_PER_FINALIZER;
1593
#else
1594
  STATIC word GC_allocd_bytes_per_finalizer = 10000;
1595
#endif
1596

1597
GC_API void GC_CALL GC_set_allocd_bytes_per_finalizer(GC_word value)
2✔
1598
{
1599
  GC_allocd_bytes_per_finalizer = value;
2✔
1600
}
2✔
1601

1602
GC_API GC_word GC_CALL GC_get_allocd_bytes_per_finalizer(void)
2✔
1603
{
1604
  return GC_allocd_bytes_per_finalizer;
2✔
1605
}
1606

1607
static word last_fo_entries = 0;
1608
static word last_bytes_finalized = 0;
1609

1610
/* Collect or expand heap in an attempt make the indicated number of    */
1611
/* free blocks available.  Should be called until the blocks are        */
1612
/* available (setting retry value to TRUE unless this is the first call */
1613
/* in a loop) or until it fails by returning FALSE.  The flags argument */
1614
/* should be IGNORE_OFF_PAGE or 0.                                      */
1615
GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
10,500✔
1616
                                      unsigned flags,
1617
                                      GC_bool retry)
1618
{
1619
    GC_bool gc_not_stopped = TRUE;
10,500✔
1620
    word blocks_to_get;
1621
    IF_CANCEL(int cancel_state;)
1622

1623
    GC_ASSERT(I_HOLD_LOCK());
10,500✔
1624
    GC_ASSERT(GC_is_initialized);
10,500✔
1625
    DISABLE_CANCEL(cancel_state);
10,500✔
1626
    if (!GC_incremental && !GC_dont_gc &&
20,680✔
1627
        ((GC_dont_expand && GC_bytes_allocd > 0)
10,180✔
1628
         || (GC_fo_entries > last_fo_entries
10,180✔
1629
             && (last_bytes_finalized | GC_bytes_finalized) != 0
250✔
1630
             && (GC_fo_entries - last_fo_entries)
488✔
1631
                * GC_allocd_bytes_per_finalizer > GC_bytes_allocd)
244✔
1632
         || GC_should_collect())) {
9,936✔
1633
      /* Try to do a full collection using 'default' stop_func (unless  */
1634
      /* nothing has been allocated since the latest collection or heap */
1635
      /* expansion is disabled).                                        */
1636
      gc_not_stopped = GC_try_to_collect_inner(
20,210✔
1637
                        GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
20,210✔
1638
                        GC_default_stop_func : GC_never_stop_func);
1639
      if (gc_not_stopped == TRUE || !retry) {
10,105✔
1640
        /* Either the collection hasn't been aborted or this is the     */
1641
        /* first attempt (in a loop).                                   */
1642
        last_fo_entries = GC_fo_entries;
10,105✔
1643
        last_bytes_finalized = GC_bytes_finalized;
10,105✔
1644
        RESTORE_CANCEL(cancel_state);
10,105✔
1645
        return TRUE;
20,605✔
1646
      }
1647
    }
1648

1649
    blocks_to_get = (GC_heapsize - GC_heapsize_at_forced_unmap)
790✔
1650
                        / (HBLKSIZE * GC_free_space_divisor)
395✔
1651
                    + needed_blocks;
1652
    if (blocks_to_get > MAXHINCR) {
395✔
1653
      word slop;
1654

1655
      /* Get the minimum required to make it likely that we can satisfy */
1656
      /* the current request in the presence of black-listing.          */
1657
      /* This will probably be more than MAXHINCR.                      */
1658
      if ((flags & IGNORE_OFF_PAGE) != 0) {
30✔
1659
        slop = 4;
×
1660
      } else {
1661
        slop = 2 * divHBLKSZ(BL_LIMIT);
30✔
1662
        if (slop > needed_blocks) slop = needed_blocks;
30✔
1663
      }
1664
      if (needed_blocks + slop > MAXHINCR) {
30✔
1665
        blocks_to_get = needed_blocks + slop;
18✔
1666
      } else {
1667
        blocks_to_get = MAXHINCR;
12✔
1668
      }
1669
      if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
30✔
1670
        blocks_to_get = divHBLKSZ(GC_WORD_MAX);
10✔
1671
    } else if (blocks_to_get < MINHINCR) {
365✔
1672
      blocks_to_get = MINHINCR;
61✔
1673
    }
1674

1675
    if (GC_max_heapsize > GC_heapsize) {
395✔
1676
      word max_get_blocks = divHBLKSZ(GC_max_heapsize - GC_heapsize);
18✔
1677
      if (blocks_to_get > max_get_blocks)
18✔
1678
        blocks_to_get = max_get_blocks > needed_blocks
18✔
1679
                        ? max_get_blocks : needed_blocks;
1680
    }
1681

1682
#   ifdef USE_MUNMAP
1683
      if (GC_unmap_threshold > 1) {
395✔
1684
        /* Return as much memory to the OS as possible before   */
1685
        /* trying to get memory from it.                        */
1686
        GC_unmap_old(0);
395✔
1687
      }
1688
#   endif
1689
    if (!GC_expand_hp_inner(blocks_to_get)
395✔
1690
        && (blocks_to_get == needed_blocks
18✔
1691
            || !GC_expand_hp_inner(needed_blocks))) {
×
1692
      if (gc_not_stopped == FALSE) {
18✔
1693
        /* Don't increment GC_fail_count here (and no warning).     */
1694
        GC_gcollect_inner();
×
1695
        GC_ASSERT(GC_bytes_allocd == 0);
×
1696
      } else if (GC_fail_count++ < GC_max_retries) {
18✔
1697
        WARN("Out of Memory!  Trying to continue...\n", 0);
×
1698
        GC_gcollect_inner();
×
1699
      } else {
1700
#       if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
1701
#         ifdef USE_MUNMAP
1702
            GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
18✔
1703
#         endif
1704
          WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " MiB."
18✔
1705
               " Returning NULL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
1706
#       endif
1707
        RESTORE_CANCEL(cancel_state);
18✔
1708
        return FALSE;
18✔
1709
      }
1710
    } else if (GC_fail_count) {
377✔
1711
      GC_COND_LOG_PRINTF("Memory available again...\n");
×
1712
    }
1713
    RESTORE_CANCEL(cancel_state);
377✔
1714
    return TRUE;
377✔
1715
}
1716

1717
/*
1718
 * Make sure the object free list for size gran (in granules) is not empty.
1719
 * Return a pointer to the first object on the free list.
1720
 * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
1721
 */
1722
GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
821,898✔
1723
{
1724
    void ** flh = &GC_obj_kinds[kind].ok_freelist[gran];
821,898✔
1725
    GC_bool tried_minor = FALSE;
821,898✔
1726
    GC_bool retry = FALSE;
821,898✔
1727

1728
    GC_ASSERT(I_HOLD_LOCK());
821,898✔
1729
    GC_ASSERT(GC_is_initialized);
821,898✔
1730
    if (0 == gran) return NULL;
821,898✔
1731

1732
    while (NULL == *flh) {
2,472,265✔
1733
      ENTER_GC();
828,469✔
1734
#     ifndef GC_DISABLE_INCREMENTAL
1735
        if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED) {
828,469✔
1736
          /* True incremental mode, not just generational.      */
1737
          /* Do our share of marking work.                      */
1738
          GC_collect_a_little_inner(1);
×
1739
        }
1740
#     endif
1741
      /* Sweep blocks for objects of this size */
1742
        GC_ASSERT(!GC_is_full_gc
828,469✔
1743
                  || NULL == GC_obj_kinds[kind].ok_reclaim_list
1744
                  || NULL == GC_obj_kinds[kind].ok_reclaim_list[gran]);
1745
        GC_continue_reclaim(gran, kind);
828,469✔
1746
      EXIT_GC();
828,469✔
1747
#     if defined(CPPCHECK)
1748
        GC_noop1((word)&flh);
1749
#     endif
1750
      if (NULL == *flh) {
828,469✔
1751
        GC_new_hblk(gran, kind);
466,823✔
1752
#       if defined(CPPCHECK)
1753
          GC_noop1((word)&flh);
1754
#       endif
1755
        if (NULL == *flh) {
466,823✔
1756
          ENTER_GC();
6,587✔
1757
          if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
6,587✔
1758
              && !tried_minor) {
416✔
1759
            GC_collect_a_little_inner(1);
213✔
1760
            tried_minor = TRUE;
213✔
1761
          } else {
1762
            if (!GC_collect_or_expand(1, 0 /* flags */, retry)) {
6,374✔
1763
              EXIT_GC();
×
1764
              return NULL;
×
1765
            }
1766
            retry = TRUE;
6,374✔
1767
          }
1768
          EXIT_GC();
6,587✔
1769
        }
1770
      }
1771
    }
1772
    /* Successful allocation; reset failure count.      */
1773
    GC_fail_count = 0;
821,898✔
1774

1775
    return (ptr_t)(*flh);
821,898✔
1776
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc