• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 2045

13 Feb 2026 07:15AM UTC coverage: 72.708% (-0.2%) from 72.89%
2045

push

travis-ci

ivmai
Explicitly define type of CORD_EMPTY to CORD

* include/gc/cord.h (CORD_EMPTY): Cast 0 to `CORD`.

6503 of 8944 relevant lines covered (72.71%)

13786650.26 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.37
/alloc.c
1
/*
2
 * Copyright (c) 1988-1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 1999-2011 Hewlett-Packard Development Company, L.P.
6
 * Copyright (c) 2008-2025 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_priv.h"
19

20
/*
21
 * Separate free lists are maintained for different sized objects up
22
 * to `MAXOBJBYTES`.
23
 * The call `GC_allocobj(lg, kind)` ensures that the free list for the given
24
 * kind of objects of the given size in granules is a nonempty one.
25
 * It returns a pointer to the first entry on the free list.
26
 * In a single-threaded world, `GC_allocobj` may be called to allocate
27
 * an object of small size `lb` (and `NORMAL` kind) as follows
28
 * (`GC_generic_malloc_inner` is a wrapper over `GC_allocobj` which also
29
 * fills in `GC_size_map` if needed):
30
 *
31
 * ```
32
 *   lg = GC_size_map[lb];
33
 *   op = GC_objfreelist[lg];
34
 *   if (NULL == op) {
35
 *     op = GC_generic_malloc_inner(lb, NORMAL, 0);
36
 *   } else {
37
 *     GC_objfreelist[lg] = obj_link(op);
38
 *     GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
39
 *   }
40
 * ```
41
 *
42
 * Note that this is very fast if the free list is not empty; it should
43
 * only involve the execution of 4 or 5 simple instructions.
44
 * All composite objects on freelists are cleared, except for
45
 * their first "pointer-sized" word.
46
 */
47

48
/*
49
 * The allocator uses `GC_allochblk` to allocate large chunks of objects.
50
 * These chunks all start on addresses that are multiples of `HBLKSIZE`.
51
 * Each allocated chunk has an associated header, which can be located
52
 * quickly based on the address of the chunk.  This makes it possible
53
 * to check quickly whether an arbitrary address corresponds to an object
54
 * administered by the allocator.  (See `headers.c` file for details.)
55
 */
56

57
/* Number of bytes not intended to be collected. */
58
word GC_non_gc_bytes = 0;
59

60
word GC_gc_no = 0;
61

62
#ifndef NO_CLOCK
63

64
GC_API void GC_CALL
65
GC_start_performance_measurement(void)
2✔
66
{
67
  GC_measure_performance = TRUE;
2✔
68
}
2✔
69

70
GC_API unsigned long GC_CALL
71
GC_get_full_gc_total_time(void)
2✔
72
{
73
  return GC_full_gc_total_time;
2✔
74
}
75

76
GC_API unsigned long GC_CALL
77
GC_get_stopped_mark_total_time(void)
2✔
78
{
79
  return GC_stopped_mark_total_time;
2✔
80
}
81

82
#  ifndef MAX_TOTAL_TIME_DIVISOR
83
/*
84
 * We shall not use big values here (so "outdated" delay time values would
85
 * have less impact on "average" delay time value than newer ones).
86
 */
87
#    define MAX_TOTAL_TIME_DIVISOR 1000
88
#  endif
89

90
GC_API unsigned long GC_CALL
91
GC_get_avg_stopped_mark_time_ns(void)
2✔
92
{
93
  unsigned long total_time;
94
  unsigned divisor;
95

96
  READER_LOCK();
2✔
97
  total_time = (unsigned long)GC_world_stopped_total_time;
2✔
98
  divisor = GC_world_stopped_total_divisor;
2✔
99
  READER_UNLOCK();
2✔
100
  if (0 == divisor) {
2✔
101
    GC_ASSERT(0 == total_time);
×
102
    /*
103
     * No world-stopped collection has occurred since the start of
104
     * performance measurements.
105
     */
106
    return 0;
×
107
  }
108

109
  /* Halve values to prevent overflow during the multiplication. */
110
  for (; total_time > ~0UL / (1000UL * 1000); total_time >>= 1) {
2✔
111
    divisor >>= 1;
×
112
    if (UNLIKELY(0 == divisor)) {
×
113
      /* The actual result is larger than representable value. */
114
      return ~0UL;
×
115
    }
116
  }
117

118
  return total_time * (1000UL * 1000) / divisor;
2✔
119
}
120

121
#endif /* !NO_CLOCK */
122

123
GC_API int GC_CALL
124
GC_is_incremental_mode(void)
16✔
125
{
126
  return (int)GC_incremental;
16✔
127
}
128

129
#ifdef THREADS
130
int GC_parallel = FALSE; /*< parallel collection is off by default */
131
#endif
132

133
#if defined(GC_FULL_FREQ) && !defined(CPPCHECK)
134
int GC_full_freq = GC_FULL_FREQ;
135
#else
136
/*
137
 * Every `GC_full_freq + 1` collection is a full collection, whether we
138
 * need it or not.
139
 */
140
int GC_full_freq = 19;
141
#endif
142

143
#ifdef THREAD_LOCAL_ALLOC
144
GC_INNER GC_bool GC_world_stopped = FALSE;
145
#endif
146

147
STATIC GC_bool GC_disable_automatic_collection = FALSE;
148

149
GC_API void GC_CALL
150
GC_set_disable_automatic_collection(int value)
2✔
151
{
152
  LOCK();
2✔
153
  GC_disable_automatic_collection = (GC_bool)value;
2✔
154
  UNLOCK();
2✔
155
}
2✔
156

157
GC_API int GC_CALL
158
GC_get_disable_automatic_collection(void)
2✔
159
{
160
  int value;
161

162
  READER_LOCK();
2✔
163
  value = (int)GC_disable_automatic_collection;
2✔
164
  READER_UNLOCK();
2✔
165
  return value;
2✔
166
}
167

168
/*
169
 * The version macros are now defined in `gc_version.h` file, which is
170
 * included by `gc.h` file, which, in turn, is included by `gc_priv.h` file.
171
 */
172
#ifndef GC_NO_VERSION_VAR
173
EXTERN_C_BEGIN
174
extern const GC_VERSION_VAL_T GC_version;
175
EXTERN_C_END
176

177
const GC_VERSION_VAL_T GC_version = ((GC_VERSION_VAL_T)GC_VERSION_MAJOR << 16)
178
                                    | (GC_VERSION_MINOR << 8)
179
                                    | GC_VERSION_MICRO;
180
#endif
181

182
GC_API GC_VERSION_VAL_T GC_CALL
183
GC_get_version(void)
2✔
184
{
185
  return ((GC_VERSION_VAL_T)GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8)
2✔
186
         | GC_VERSION_MICRO;
187
}
188

189
GC_API int GC_CALL
190
GC_get_dont_add_byte_at_end(void)
42✔
191
{
192
#if MAX_EXTRA_BYTES > 0
193
  /* This is meaningful only if `GC_all_interior_pointers`. */
194
  return 0;
42✔
195
#else
196
  return 1;
197
#endif
198
}
199

200
/* Some more variables. */
201

202
#ifdef GC_DONT_EXPAND
203
int GC_dont_expand = TRUE;
204
#else
205
int GC_dont_expand = FALSE;
206
#endif
207

208
#if defined(GC_FREE_SPACE_DIVISOR) && !defined(CPPCHECK)
209
word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR; /*< must be positive */
210
#else
211
word GC_free_space_divisor = 3;
212
#endif
213

214
GC_INNER int GC_CALLBACK
215
GC_never_stop_func(void)
7,432,231✔
216
{
217
  return FALSE;
7,432,231✔
218
}
219

220
#if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
221
/*
222
 * We try to keep pause times from exceeding this by much.
223
 * In milliseconds.
224
 */
225
unsigned long GC_time_limit = GC_TIME_LIMIT;
226
#elif defined(PARALLEL_MARK)
227
/*
228
 * The parallel marker cannot be interrupted for now, so the time limit
229
 * is absent by default.
230
 */
231
unsigned long GC_time_limit = GC_TIME_UNLIMITED;
232
#else
233
unsigned long GC_time_limit = 15;
234
#endif
235

236
#ifndef NO_CLOCK
237
/*
238
 * The nanoseconds add-on to `GC_time_limit` value.
239
 * Not updated by `GC_set_time_limit()`.
240
 * Ignored if the value of `GC_time_limit` is `GC_TIME_UNLIMITED`.
241
 */
242
STATIC unsigned long GC_time_lim_nsec = 0;
243

244
#  define TV_NSEC_LIMIT (1000UL * 1000) /*< amount of nanoseconds in 1 ms */
245

246
GC_API void GC_CALL
247
GC_set_time_limit_tv(struct GC_timeval_s tv)
2✔
248
{
249
  GC_ASSERT(tv.tv_ms <= GC_TIME_UNLIMITED);
2✔
250
  GC_ASSERT(tv.tv_nsec < TV_NSEC_LIMIT);
2✔
251
  GC_time_limit = tv.tv_ms;
2✔
252
  GC_time_lim_nsec = tv.tv_nsec;
2✔
253
}
2✔
254

255
GC_API struct GC_timeval_s GC_CALL
256
GC_get_time_limit_tv(void)
2✔
257
{
258
  struct GC_timeval_s tv;
259

260
  tv.tv_ms = GC_time_limit;
2✔
261
  tv.tv_nsec = GC_time_lim_nsec;
2✔
262
  return tv;
2✔
263
}
264
#endif /* !NO_CLOCK */
265

266
/* Note: accessed holding the allocator lock. */
267
STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
268

269
GC_API void GC_CALL
270
GC_set_stop_func(GC_stop_func stop_func)
2✔
271
{
272
  GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
2✔
273
  LOCK();
2✔
274
  GC_default_stop_func = stop_func;
2✔
275
  UNLOCK();
2✔
276
}
2✔
277

278
GC_API GC_stop_func GC_CALL
279
GC_get_stop_func(void)
2✔
280
{
281
  GC_stop_func stop_func;
282

283
  READER_LOCK();
2✔
284
  stop_func = GC_default_stop_func;
2✔
285
  READER_UNLOCK();
2✔
286
  return stop_func;
2✔
287
}
288

289
#if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
290
#  define GC_timeout_stop_func GC_default_stop_func
291
#else
292
STATIC int GC_CALLBACK
293
GC_timeout_stop_func(void)
6,402,133✔
294
{
295
  CLOCK_TYPE current_time;
296
  static unsigned count = 0;
297
  unsigned long time_diff, nsec_diff;
298

299
  GC_ASSERT(I_HOLD_LOCK());
6,402,133✔
300
  if (GC_default_stop_func())
6,402,133✔
301
    return TRUE;
6,402,133✔
302

303
  if (GC_time_limit == GC_TIME_UNLIMITED || (count++ & 3) != 0)
6,402,133✔
304
    return FALSE;
6,402,133✔
305

306
  GET_TIME(current_time);
×
307
  time_diff = MS_TIME_DIFF(current_time, GC_start_time);
×
308
  nsec_diff = NS_FRAC_TIME_DIFF(current_time, GC_start_time);
×
309
#  if defined(CPPCHECK)
310
  GC_noop1_ptr(&nsec_diff);
311
#  endif
312
  if (time_diff >= GC_time_limit
×
313
      && (time_diff > GC_time_limit || nsec_diff >= GC_time_lim_nsec)) {
×
314
    GC_COND_LOG_PRINTF(
×
315
        "Abandoning stopped marking after %lu ms %lu ns (attempt %d)\n",
316
        time_diff, nsec_diff, GC_n_attempts);
317
    return TRUE;
×
318
  }
319

320
  return FALSE;
×
321
}
322
#endif /* !GC_DISABLE_INCREMENTAL */
323

324
#ifdef THREADS
325
GC_INNER word GC_total_stacksize = 0;
326
#endif
327

328
/* The lowest value returned by `min_bytes_allocd()`. */
329
static size_t min_bytes_allocd_minimum = 1;
330

331
GC_API void GC_CALL
332
GC_set_min_bytes_allocd(size_t value)
2✔
333
{
334
  GC_ASSERT(value > 0);
2✔
335
  min_bytes_allocd_minimum = value;
2✔
336
}
2✔
337

338
GC_API size_t GC_CALL
339
GC_get_min_bytes_allocd(void)
2✔
340
{
341
  return min_bytes_allocd_minimum;
2✔
342
}
343

344
/*
345
 * Return the minimum number of bytes that must be allocated between
346
 * collections to amortize the cost of the latter.  Should be nonzero.
347
 */
348
static word
349
min_bytes_allocd(void)
39,224✔
350
{
351
  word result;
352
  word stack_size;
353
  /*
354
   * Total size of roots, it includes double stack size, since the stack
355
   * is expensive to scan.
356
   */
357
  word total_root_size;
358
  /* Estimate of memory to be scanned during normal collection. */
359
  word scan_size;
360

361
  GC_ASSERT(I_HOLD_LOCK());
39,224✔
362
#ifdef THREADS
363
  if (GC_has_running_threads()) {
39,224✔
364
    /* We are multi-threaded... */
365
    stack_size = GC_total_stacksize;
14,817✔
366
    /*
367
     * For now, we just use the value computed during the latest garbage
368
     * collection.
369
     */
370
#  ifdef DEBUG_THREADS
371
    GC_log_printf("Total stacks size: %lu\n", (unsigned long)stack_size);
372
#  endif
373
  } else
374
#endif
375
  /* else */ {
376
#ifdef STACK_NOT_SCANNED
377
    stack_size = 0;
378
#elif defined(STACK_GROWS_UP)
379
    stack_size = (word)(GC_approx_sp() - GC_stackbottom);
380
#else
381
    stack_size = (word)(GC_stackbottom - GC_approx_sp());
24,407✔
382
#endif
383
  }
384

385
  total_root_size = 2 * stack_size + GC_root_size;
39,224✔
386
  scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4 + total_root_size;
39,224✔
387
  result = scan_size / GC_free_space_divisor;
39,224✔
388
  if (GC_incremental) {
39,224✔
389
    result /= 2;
30,543✔
390
  }
391
  return result > min_bytes_allocd_minimum ? result : min_bytes_allocd_minimum;
39,224✔
392
}
393

394
/*
395
 * Return the number of bytes allocated, adjusted for explicit storage
396
 * management, etc.  This number is used in deciding when to trigger
397
 * collections.
398
 */
399
STATIC word
400
GC_adj_bytes_allocd(void)
6,628,901✔
401
{
402
  GC_signed_word result;
403
  GC_signed_word expl_managed = (GC_signed_word)GC_non_gc_bytes
6,628,901✔
404
                                - (GC_signed_word)GC_non_gc_bytes_at_gc;
6,628,901✔
405

406
  /*
407
   * Do not count what was explicitly freed, or newly allocated for
408
   * explicit management.  Note that deallocating an explicitly managed
409
   * object should not alter result, assuming the client is playing by
410
   * the rules.
411
   */
412
  result = (GC_signed_word)GC_bytes_allocd + (GC_signed_word)GC_bytes_dropped
6,628,901✔
413
           - (GC_signed_word)GC_bytes_freed
6,628,901✔
414
           + (GC_signed_word)GC_finalizer_bytes_freed - expl_managed;
6,628,901✔
415
  if (result > (GC_signed_word)GC_bytes_allocd) {
6,628,901✔
416
    /* Probably a client bug or unfortunate scheduling. */
417
    result = (GC_signed_word)GC_bytes_allocd;
465,920✔
418
  }
419
  /*
420
   * We count objects enqueued for finalization as though they had been
421
   * reallocated this round.  Finalization is visible to user.
422
   * And if we do not count this, we have stability problems for programs
423
   * that finalize all objects.
424
   */
425
  result += (GC_signed_word)GC_bytes_finalized;
6,628,901✔
426
  if (result < (GC_signed_word)(GC_bytes_allocd >> 3)) {
6,628,901✔
427
    /*
428
     * Always count at least 1/8 of the allocations.  We do not want to
429
     * collect too infrequently, since that would inhibit coalescing of
430
     * free storage blocks.  This also makes us partially robust against
431
     * client bugs.
432
     */
433
    result = (GC_signed_word)(GC_bytes_allocd >> 3);
6,317✔
434
  }
435
  return (word)result;
6,628,901✔
436
}
437

438
/*
439
 * Clear up a few frames worth of garbage left at the top of the stack.
440
 * This is used to prevent us from accidentally treating garbage left
441
 * on the stack by other parts of the collector as roots.
442
 * This differs from the code in `misc.c` file, which actually tries
443
 * to keep the stack clear of long-lived, client-generated garbage.
444
 */
445
STATIC void
446
GC_clear_a_few_frames(void)
25,469✔
447
{
448
#ifndef CLEAR_STACK_NPTRS
449
#  define CLEAR_STACK_NPTRS 64 /*< pointers */
450
#endif
451
  volatile ptr_t frames[CLEAR_STACK_NPTRS];
452

453
  BZERO(CAST_AWAY_VOLATILE_PVOID(frames), sizeof(frames));
25,469✔
454
}
25,469✔
455

456
GC_API void GC_CALL
457
GC_start_incremental_collection(void)
×
458
{
459
#ifndef GC_DISABLE_INCREMENTAL
460
  LOCK();
×
461
  if (GC_incremental) {
×
462
    GC_should_start_incremental_collection = TRUE;
×
463
    if (!GC_dont_gc) {
×
464
      GC_collect_a_little_inner(1);
×
465
    }
466
  }
467
  UNLOCK();
×
468
#endif
469
}
×
470

471
GC_INNER GC_bool
472
GC_should_collect(void)
6,633,047✔
473
{
474
  static word last_min_bytes_allocd, last_gc_no;
475

476
  GC_ASSERT(I_HOLD_LOCK());
6,633,047✔
477
  if (last_gc_no != GC_gc_no) {
6,633,047✔
478
    last_min_bytes_allocd = min_bytes_allocd();
25,097✔
479
    last_gc_no = GC_gc_no;
25,097✔
480
  }
481
#ifndef GC_DISABLE_INCREMENTAL
482
  if (GC_should_start_incremental_collection) {
6,633,047✔
483
    GC_should_start_incremental_collection = FALSE;
×
484
    return TRUE;
×
485
  }
486
#endif
487
  if (GC_disable_automatic_collection)
6,633,047✔
488
    return FALSE;
×
489

490
  if (GC_last_heap_growth_gc_no == GC_gc_no) {
6,633,047✔
491
    /* Avoid expanding past limits used by black-listing. */
492
    return TRUE;
4,146✔
493
  }
494

495
  return GC_adj_bytes_allocd() >= last_min_bytes_allocd;
6,628,901✔
496
}
497

498
/*
499
 * Called at start of full collections.  Not called if zero.
500
 * Called with the allocator lock held.  Not used by the collector itself.
501
 */
502
/* `STATIC` */ GC_start_callback_proc GC_start_call_back = 0;
503

504
GC_API void GC_CALL
505
GC_set_start_callback(GC_start_callback_proc fn)
2✔
506
{
507
  LOCK();
2✔
508
  GC_start_call_back = fn;
2✔
509
  UNLOCK();
2✔
510
}
2✔
511

512
GC_API GC_start_callback_proc GC_CALL
513
GC_get_start_callback(void)
2✔
514
{
515
  GC_start_callback_proc fn;
516

517
  READER_LOCK();
2✔
518
  fn = GC_start_call_back;
2✔
519
  READER_UNLOCK();
2✔
520
  return fn;
2✔
521
}
522

523
GC_INLINE void
524
GC_notify_full_gc(void)
11,968✔
525
{
526
  if (GC_start_call_back != 0) {
11,968✔
527
    (*GC_start_call_back)();
×
528
  }
529
}
11,968✔
530

531
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
532
STATIC void GC_finish_collection(void);
533

534
/*
535
 * Initiate a garbage collection if appropriate.  Choose judiciously
536
 * between partial, full, and stop-world collections.
537
 */
538
STATIC void
539
GC_maybe_gc(void)
6,146,067✔
540
{
541
  GC_ASSERT(I_HOLD_LOCK());
6,146,067✔
542
  ASSERT_CANCEL_DISABLED();
6,146,067✔
543
  if (!GC_should_collect())
6,146,067✔
544
    return;
6,131,595✔
545

546
  if (!GC_incremental) {
14,472✔
547
    GC_gcollect_inner();
×
548
    return;
×
549
  }
550

551
  GC_ASSERT(!GC_collection_in_progress());
14,472✔
552
#ifdef PARALLEL_MARK
553
  if (GC_parallel)
14,472✔
554
    GC_wait_for_reclaim();
6,473✔
555
#endif
556
  if (GC_need_full_gc || GC_n_partial_gcs >= GC_full_freq) {
14,472✔
557
    GC_COND_LOG_PRINTF(
971✔
558
        "***>Full mark for collection #%lu after %lu bytes allocated\n",
559
        (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
560
    GC_notify_full_gc();
971✔
561
    ENTER_GC();
971✔
562
    GC_promote_black_lists();
971✔
563
    (void)GC_reclaim_all((GC_stop_func)0, TRUE);
971✔
564
    GC_clear_marks();
971✔
565
    EXIT_GC();
971✔
566
    GC_n_partial_gcs = 0;
971✔
567
    GC_is_full_gc = TRUE;
971✔
568
  } else {
569
    GC_n_partial_gcs++;
13,501✔
570
  }
571

572
  /*
573
   * Try to mark with the world stopped.  If we run out of time, then this
574
   * turns into an incremental marking.
575
   */
576
#ifndef NO_CLOCK
577
  if (GC_time_limit != GC_TIME_UNLIMITED)
14,472✔
578
    GET_TIME(GC_start_time);
×
579
#endif
580
  if (GC_stopped_mark(GC_timeout_stop_func)) {
14,472✔
581
    SAVE_CALLERS_TO_LAST_STACK();
582
    GC_finish_collection();
14,472✔
583
  } else if (!GC_is_full_gc) {
×
584
    /* Count this as the first attempt. */
585
    GC_n_attempts++;
×
586
  }
587
}
588

589
STATIC GC_on_collection_event_proc GC_on_collection_event = 0;
590

591
GC_API void GC_CALL
592
GC_set_on_collection_event(GC_on_collection_event_proc fn)
2✔
593
{
594
  /* `fn` may be 0 (means no event notifier). */
595
  LOCK();
2✔
596
  GC_on_collection_event = fn;
2✔
597
  UNLOCK();
2✔
598
}
2✔
599

600
GC_API GC_on_collection_event_proc GC_CALL
601
GC_get_on_collection_event(void)
2✔
602
{
603
  GC_on_collection_event_proc fn;
604

605
  READER_LOCK();
2✔
606
  fn = GC_on_collection_event;
2✔
607
  READER_UNLOCK();
2✔
608
  return fn;
2✔
609
}
610

611
GC_INNER GC_bool
612
GC_try_to_collect_inner(GC_stop_func stop_func)
11,039✔
613
{
614
#ifndef NO_CLOCK
615
  CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
11,039✔
616
  GC_bool start_time_valid;
617
#endif
618

619
  ASSERT_CANCEL_DISABLED();
11,039✔
620
  GC_ASSERT(I_HOLD_LOCK());
11,039✔
621
  GC_ASSERT(GC_is_initialized);
11,039✔
622
  if (GC_dont_gc || (*stop_func)())
11,039✔
623
    return FALSE;
11,039✔
624
  if (GC_on_collection_event)
10,997✔
625
    GC_on_collection_event(GC_EVENT_START);
×
626
  if (GC_incremental && GC_collection_in_progress()) {
10,997✔
627
    GC_COND_LOG_PRINTF(
×
628
        "GC_try_to_collect_inner: finishing collection in progress\n");
629
    /* Just finish collection already in progress. */
630
    do {
631
      if ((*stop_func)()) {
×
632
        /* TODO: Notify `GC_EVENT_ABANDON`. */
633
        return FALSE;
×
634
      }
635
      GC_collect_a_little_inner(1);
×
636
    } while (GC_collection_in_progress());
×
637
  }
638
  GC_notify_full_gc();
10,997✔
639
#ifndef NO_CLOCK
640
  start_time_valid = FALSE;
10,997✔
641
  if ((GC_print_stats | (int)GC_measure_performance) != 0) {
10,997✔
642
    if (GC_print_stats)
2,262✔
643
      GC_log_printf("Initiating full world-stop collection!\n");
×
644
    start_time_valid = TRUE;
2,262✔
645
    GET_TIME(start_time);
2,262✔
646
  }
647
#endif
648
  GC_promote_black_lists();
10,997✔
649
  /*
650
   * Make sure all blocks have been reclaimed, so sweep routines do not
651
   * see cleared mark bits.  If we are guaranteed to finish, then this
652
   * is unnecessary.  In the find-leak case, we have to finish to
653
   * guarantee that previously unmarked objects are not reported as leaks.
654
   */
655
#ifdef PARALLEL_MARK
656
  if (GC_parallel)
10,997✔
657
    GC_wait_for_reclaim();
4,005✔
658
#endif
659
  ENTER_GC();
10,997✔
660
  if ((GC_find_leak_inner || stop_func != GC_never_stop_func)
10,997✔
661
      && !GC_reclaim_all(stop_func, FALSE)) {
66✔
662
    /* Aborted.  So far everything is still consistent. */
663
    EXIT_GC();
×
664
    /* TODO: Notify `GC_EVENT_ABANDON`. */
665
    return FALSE;
×
666
  }
667
  GC_invalidate_mark_state(); /*< flush mark stack */
10,997✔
668
  GC_clear_marks();
10,997✔
669
  SAVE_CALLERS_TO_LAST_STACK();
670
  GC_is_full_gc = TRUE;
10,997✔
671
  EXIT_GC();
10,997✔
672
  if (!GC_stopped_mark(stop_func)) {
10,997✔
673
    if (!GC_incremental) {
×
674
      /*
675
       * We are partially done and have no way to complete or use
676
       * current work.  Reestablish invariants as cheaply as possible.
677
       */
678
      GC_invalidate_mark_state();
×
679
      GC_unpromote_black_lists();
×
680
    } else {
681
      /*
682
       * We claim the world is already (or still) consistent.
683
       * We will finish incrementally.
684
       */
685
    }
686
    /* TODO: Notify `GC_EVENT_ABANDON`. */
687
    return FALSE;
×
688
  }
689
  GC_finish_collection();
10,997✔
690
#ifndef NO_CLOCK
691
  if (start_time_valid) {
10,997✔
692
    CLOCK_TYPE current_time;
693
    unsigned long time_diff, ns_frac_diff;
694

695
    GET_TIME(current_time);
2,262✔
696
    time_diff = MS_TIME_DIFF(current_time, start_time);
2,262✔
697
    ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
2,262✔
698
    if (GC_measure_performance) {
2,262✔
699
      GC_full_gc_total_time += time_diff; /*< may wrap */
2,262✔
700
      GC_full_gc_total_ns_frac += (unsigned32)ns_frac_diff;
2,262✔
701
      if (GC_full_gc_total_ns_frac >= (unsigned32)1000000UL) {
2,262✔
702
        /* Overflow of the nanoseconds part. */
703
        GC_full_gc_total_ns_frac -= (unsigned32)1000000UL;
1,150✔
704
        GC_full_gc_total_time++;
1,150✔
705
      }
706
    }
707
    if (GC_print_stats)
2,262✔
708
      GC_log_printf("Complete collection took %lu ms %lu ns\n", time_diff,
×
709
                    ns_frac_diff);
710
  }
711
#endif
712
  if (GC_on_collection_event)
10,997✔
713
    GC_on_collection_event(GC_EVENT_END);
×
714
  return TRUE;
10,997✔
715
}
716

717
/* The default value of `GC_rate`. */
718
#ifndef GC_RATE
719
#  define GC_RATE 10
720
#endif
721

722
/*
723
 * When `GC_collect_a_little_inner()` performs `n_blocks` units of garbage
724
 * collection work, a unit is intended to touch roughly `GC_rate` pages.
725
 * (But, every once in a while, we do more than that.)  This needs to be
726
 * a fairly large number with our current incremental collection strategy,
727
 * since otherwise we allocate too much during garbage collection, and
728
 * the cleanup gets expensive.
729
 */
730
STATIC unsigned GC_rate = GC_RATE;
731

732
GC_API void GC_CALL
733
GC_set_rate(int value)
2✔
734
{
735
  GC_ASSERT(value > 0);
2✔
736
  GC_rate = (unsigned)value;
2✔
737
}
2✔
738

739
GC_API int GC_CALL
740
GC_get_rate(void)
2✔
741
{
742
  return (int)GC_rate;
2✔
743
}
744

745
/* The default maximum number of prior attempts at world stop marking. */
746
#ifndef MAX_PRIOR_ATTEMPTS
747
#  define MAX_PRIOR_ATTEMPTS 3
748
#endif
749

750
/*
751
 * The maximum number of prior attempts at world stop marking.
752
 * A value of 1 means that we finish the second time, no matter how long
753
 * it takes.  Does not count the initial root scan for a full collection.
754
 */
755
static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
756

757
GC_API void GC_CALL
758
GC_set_max_prior_attempts(int value)
2✔
759
{
760
  GC_ASSERT(value >= 0);
2✔
761
  max_prior_attempts = value;
2✔
762
}
2✔
763

764
GC_API int GC_CALL
765
GC_get_max_prior_attempts(void)
2✔
766
{
767
  return max_prior_attempts;
2✔
768
}
769

770
GC_INNER void
771
GC_collect_a_little_inner(size_t n_blocks)
6,150,694✔
772
{
773
  IF_CANCEL(int cancel_state;)
774

775
  GC_ASSERT(I_HOLD_LOCK());
6,150,694✔
776
  GC_ASSERT(GC_is_initialized);
6,150,694✔
777
  DISABLE_CANCEL(cancel_state);
6,150,694✔
778
  if (GC_incremental && GC_collection_in_progress()) {
6,150,694✔
779
    size_t i;
780
    size_t max_deficit = GC_rate * n_blocks;
×
781

782
    ENTER_GC();
×
783
#ifdef PARALLEL_MARK
784
    if (GC_time_limit != GC_TIME_UNLIMITED)
×
785
      GC_parallel_mark_disabled = TRUE;
×
786
#endif
787
    for (i = GC_mark_deficit; i < max_deficit; i++) {
×
788
      if (GC_mark_some(NULL))
×
789
        break;
×
790
    }
791
#ifdef PARALLEL_MARK
792
    GC_parallel_mark_disabled = FALSE;
×
793
#endif
794
    EXIT_GC();
×
795

796
    if (i < max_deficit && !GC_dont_gc) {
×
797
      GC_ASSERT(!GC_collection_in_progress());
×
798
      /* Need to follow up with a full collection. */
799
      SAVE_CALLERS_TO_LAST_STACK();
800
#ifdef PARALLEL_MARK
801
      if (GC_parallel)
×
802
        GC_wait_for_reclaim();
×
803
#endif
804
#ifndef NO_CLOCK
805
      if (GC_time_limit != GC_TIME_UNLIMITED
×
806
          && GC_n_attempts < max_prior_attempts)
×
807
        GET_TIME(GC_start_time);
×
808
#endif
809
      if (GC_stopped_mark(GC_n_attempts < max_prior_attempts
×
810
                              ? GC_timeout_stop_func
811
                              : GC_never_stop_func)) {
812
        GC_finish_collection();
×
813
      } else {
814
        GC_n_attempts++;
×
815
      }
816
    }
817
    if (GC_mark_deficit > 0) {
×
818
      GC_mark_deficit
819
          = GC_mark_deficit > max_deficit ? GC_mark_deficit - max_deficit : 0;
×
820
    }
821
  } else if (!GC_dont_gc) {
6,150,694✔
822
    GC_maybe_gc();
6,146,067✔
823
  }
824
  RESTORE_CANCEL(cancel_state);
6,150,694✔
825
}
6,150,694✔
826

827
#if !defined(NO_FIND_LEAK) || !defined(SHORT_DBG_HDRS)
828
GC_INNER void (*GC_check_heap)(void) = 0;
829
GC_INNER void (*GC_print_all_smashed)(void) = 0;
830
#endif
831

832
GC_API int GC_CALL
833
GC_collect_a_little(void)
2,381,127✔
834
{
835
  int result;
836

837
  if (UNLIKELY(!GC_is_initialized))
2,381,127✔
838
    GC_init();
×
839
  LOCK();
2,381,127✔
840
  /*
841
   * Note: if the collection is in progress, this may do marking (not
842
   * stopping the world) even in case of disabled garbage collection.
843
   */
844
  GC_collect_a_little_inner(1);
2,381,127✔
845
  result = (int)GC_collection_in_progress();
2,381,127✔
846
  UNLOCK();
2,381,127✔
847
  if (GC_debugging_started && !result)
2,381,127✔
848
    GC_print_all_smashed();
×
849
  return result;
2,381,127✔
850
}
851

852
#ifdef THREADS
853
GC_API void GC_CALL
854
GC_stop_world_external(void)
2✔
855
{
856
  GC_ASSERT(GC_is_initialized);
2✔
857
  LOCK();
2✔
858
#  ifdef THREAD_LOCAL_ALLOC
859
  GC_ASSERT(!GC_world_stopped);
2✔
860
#  endif
861
  ENTER_GC();
2✔
862
  STOP_WORLD();
2✔
863
#  ifdef THREAD_LOCAL_ALLOC
864
  GC_world_stopped = TRUE;
2✔
865
#  endif
866
}
2✔
867

868
GC_API void GC_CALL
869
GC_start_world_external(void)
2✔
870
{
871
#  ifdef THREAD_LOCAL_ALLOC
872
  GC_ASSERT(GC_world_stopped);
2✔
873
  GC_world_stopped = FALSE;
2✔
874
#  else
875
  GC_ASSERT(GC_is_initialized);
876
#  endif
877
  START_WORLD();
2✔
878
  EXIT_GC();
2✔
879
  UNLOCK();
2✔
880
}
2✔
881
#endif /* THREADS */
882

883
#ifdef USE_MUNMAP
884
#  ifndef MUNMAP_THRESHOLD
885
#    define MUNMAP_THRESHOLD 7
886
#  endif
887
GC_INNER unsigned GC_unmap_threshold = MUNMAP_THRESHOLD;
888

889
#  define IF_USE_MUNMAP(x) x
890
#  define COMMA_IF_USE_MUNMAP(x) /* comma */ , x
891
#else
892
#  define IF_USE_MUNMAP(x)
893
#  define COMMA_IF_USE_MUNMAP(x)
894
#endif /* !USE_MUNMAP */
895

896
/*
897
 * We stop the world and mark from all roots.  If `stop_func()` ever
898
 * returns `TRUE`, we may fail and return `FALSE`.  Increment `GC_gc_no`
899
 * if we succeed.
900
 */
901
STATIC GC_bool
902
GC_stopped_mark(GC_stop_func stop_func)
25,469✔
903
{
904
  ptr_t cold_gc_frame = GC_approx_sp();
25,469✔
905
  unsigned abandoned_at;
906
#ifndef NO_CLOCK
907
  CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
25,469✔
908
  GC_bool start_time_valid = FALSE;
25,469✔
909
#endif
910

911
  GC_ASSERT(I_HOLD_LOCK());
25,469✔
912
  GC_ASSERT(GC_is_initialized);
25,469✔
913
  ENTER_GC();
25,469✔
914
#if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
915
  GC_add_current_malloc_heap();
916
#endif
917
#if defined(REGISTER_LIBRARIES_EARLY)
918
  GC_cond_register_dynamic_libraries();
25,469✔
919
#endif
920

921
#if !defined(GC_NO_FINALIZATION) && !defined(GC_TOGGLE_REFS_NOT_NEEDED)
922
  GC_process_togglerefs();
25,469✔
923
#endif
924

925
  /* Output blank line for convenience here. */
926
  GC_COND_LOG_PRINTF(
25,469✔
927
      "\n--> Marking for collection #%lu after %lu allocated bytes\n",
928
      (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
929
#ifndef NO_CLOCK
930
  if (GC_PRINT_STATS_FLAG || GC_measure_performance) {
25,469✔
931
    GET_TIME(start_time);
8,608✔
932
    start_time_valid = TRUE;
8,608✔
933
  }
934
#endif
935
#ifdef THREADS
936
  if (GC_on_collection_event)
25,469✔
937
    GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD);
×
938
#endif
939
  STOP_WORLD();
25,469✔
940
#ifdef THREADS
941
  if (GC_on_collection_event)
25,469✔
942
    GC_on_collection_event(GC_EVENT_POST_STOP_WORLD);
×
943
#  ifdef THREAD_LOCAL_ALLOC
944
  GC_world_stopped = TRUE;
25,469✔
945
#  elif defined(CPPCHECK)
946
  /* Workaround a warning about adjacent same `if` condition. */
947
  (void)0;
948
#  endif
949
#endif
950

951
#ifdef MAKE_BACK_GRAPH
952
  if (GC_print_back_height) {
953
    GC_build_back_graph();
954
  }
955
#endif
956

957
  /* Notify about marking from all roots. */
958
  if (GC_on_collection_event)
25,469✔
959
    GC_on_collection_event(GC_EVENT_MARK_START);
×
960

961
  /* Minimize junk left in my registers and on the stack. */
962
  GC_clear_a_few_frames();
25,469✔
963
  GC_noop6(0, 0, 0, 0, 0, 0);
25,469✔
964

965
  GC_initiate_gc();
25,469✔
966
#ifdef PARALLEL_MARK
967
  if (stop_func != GC_never_stop_func)
25,469✔
968
    GC_parallel_mark_disabled = TRUE;
14,472✔
969
#endif
970
  for (abandoned_at = 1; !(*stop_func)(); abandoned_at++) {
7,420,557✔
971
    if (GC_mark_some(cold_gc_frame)) {
7,420,557✔
972
#ifdef PARALLEL_MARK
973
      if (GC_parallel && GC_parallel_mark_disabled) {
25,469✔
974
        GC_COND_LOG_PRINTF("Stopped marking done after %u iterations"
6,473✔
975
                           " with disabled parallel marker\n",
976
                           abandoned_at - 1);
977
      }
978
#endif
979
      abandoned_at = 0;
25,469✔
980
      break;
25,469✔
981
    }
982
  }
983
#ifdef PARALLEL_MARK
984
  GC_parallel_mark_disabled = FALSE;
25,469✔
985
#endif
986

987
  if (abandoned_at > 0) {
25,469✔
988
    /* Give the mutator a chance. */
989
    GC_mark_deficit = abandoned_at - 1;
×
990
    /* TODO: Notify `GC_EVENT_MARK_ABANDON`. */
991
  } else {
992
    GC_gc_no++;
25,469✔
993
    /* Check all debugged objects for consistency. */
994
    if (GC_debugging_started)
25,469✔
995
      GC_check_heap();
252✔
996
    if (GC_on_collection_event)
25,469✔
997
      GC_on_collection_event(GC_EVENT_MARK_END);
×
998
  }
999

1000
#ifdef THREADS
1001
  if (GC_on_collection_event)
25,469✔
1002
    GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
×
1003
#endif
1004
#ifdef THREAD_LOCAL_ALLOC
1005
  GC_world_stopped = FALSE;
25,469✔
1006
#endif
1007
  START_WORLD();
25,469✔
1008
#ifdef THREADS
1009
  if (GC_on_collection_event)
25,469✔
1010
    GC_on_collection_event(GC_EVENT_POST_START_WORLD);
×
1011
#endif
1012

1013
#ifndef NO_CLOCK
1014
  if (start_time_valid) {
25,469✔
1015
    CLOCK_TYPE current_time;
1016
    unsigned long time_diff, ns_frac_diff;
1017

1018
    /* TODO: Avoid code duplication from `GC_try_to_collect_inner`. */
1019
    GET_TIME(current_time);
8,608✔
1020
    time_diff = MS_TIME_DIFF(current_time, start_time);
8,608✔
1021
    ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
8,608✔
1022
    if (GC_measure_performance) {
8,608✔
1023
      GC_stopped_mark_total_time += time_diff; /*< may wrap */
8,608✔
1024
      GC_stopped_mark_total_ns_frac += (unsigned32)ns_frac_diff;
8,608✔
1025
      if (GC_stopped_mark_total_ns_frac >= (unsigned32)1000000UL) {
8,608✔
1026
        GC_stopped_mark_total_ns_frac -= (unsigned32)1000000UL;
3,830✔
1027
        GC_stopped_mark_total_time++;
3,830✔
1028
      }
1029
    }
1030

1031
    if (GC_PRINT_STATS_FLAG || GC_measure_performance) {
8,608✔
1032
      unsigned total_time = GC_world_stopped_total_time;
8,608✔
1033
      unsigned divisor = GC_world_stopped_total_divisor;
8,608✔
1034

1035
      /* Compute new world-stop delay total time. */
1036
      if (total_time > (((unsigned)-1) >> 1)
8,608✔
1037
          || divisor >= MAX_TOTAL_TIME_DIVISOR) {
8,608✔
1038
        /* Halve values if overflow occurs. */
1039
        total_time >>= 1;
17✔
1040
        divisor >>= 1;
17✔
1041
      }
1042
      total_time += time_diff < (((unsigned)-1) >> 1) ? (unsigned)time_diff
8,608✔
1043
                                                      : ((unsigned)-1) >> 1;
8,608✔
1044
      /* Update old `GC_world_stopped_total_time` and its divisor. */
1045
      GC_world_stopped_total_time = total_time;
8,608✔
1046
      GC_world_stopped_total_divisor = ++divisor;
8,608✔
1047
      if (GC_PRINT_STATS_FLAG && 0 == abandoned_at) {
8,608✔
1048
        GC_ASSERT(divisor != 0);
×
1049
        GC_log_printf(
×
1050
            "World-stopped marking took %lu ms %lu ns (%u ms in average)\n",
1051
            time_diff, ns_frac_diff, total_time / divisor);
1052
      }
1053
    }
1054
  }
1055
#endif
1056

1057
  EXIT_GC();
25,469✔
1058
  if (0 == abandoned_at)
25,469✔
1059
    return TRUE;
25,469✔
1060
  GC_COND_LOG_PRINTF("Abandoned stopped marking after %u iterations\n",
×
1061
                     abandoned_at - 1);
1062
  return FALSE;
×
1063
}
1064

1065
GC_INNER void
1066
GC_set_fl_marks(ptr_t q)
1,096,868✔
1067
{
1068
#ifdef GC_ASSERTIONS
1069
  ptr_t q2;
1070
#endif
1071
  struct hblk *h = HBLKPTR(q);
1,096,868✔
1072
  const struct hblk *last_h = h;
1,096,868✔
1073
  hdr *hhdr;
1074
#ifdef MARK_BIT_PER_OBJ
1075
  size_t sz;
1076
#endif
1077

1078
  GC_ASSERT(q != NULL);
1,096,868✔
1079
  hhdr = HDR(h);
1,096,868✔
1080
#ifdef MARK_BIT_PER_OBJ
1081
  sz = hhdr->hb_sz;
1082
#endif
1083
#ifdef GC_ASSERTIONS
1084
  q2 = (ptr_t)obj_link(q);
1,096,868✔
1085
#endif
1086
  for (;;) {
28,951,861✔
1087
    size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz);
30,048,729✔
1088

1089
    if (!mark_bit_from_hdr(hhdr, bit_no)) {
30,048,729✔
1090
      set_mark_bit_from_hdr(hhdr, bit_no);
15,883,150✔
1091
      INCR_MARKS(hhdr);
15,883,150✔
1092
    }
1093
    q = (ptr_t)obj_link(q);
30,048,729✔
1094
    if (NULL == q)
30,048,729✔
1095
      break;
1,096,868✔
1096
#ifdef GC_ASSERTIONS
1097
    /*
1098
     * Detect a cycle in the free list.  The algorithm is to have
1099
     * a "twice faster" iterator over the list which meets the first
1100
     * one in case of a cycle existing in the list.
1101
     */
1102
    if (q2 != NULL) {
28,951,861✔
1103
      q2 = (ptr_t)obj_link(q2);
14,734,327✔
1104
      GC_ASSERT(q2 != q);
14,734,327✔
1105
      if (q2 != NULL) {
14,734,327✔
1106
        q2 = (ptr_t)obj_link(q2);
14,217,534✔
1107
        GC_ASSERT(q2 != q);
14,217,534✔
1108
      }
1109
    }
1110
#endif
1111

1112
    h = HBLKPTR(q);
28,951,861✔
1113
    if (UNLIKELY(h != last_h)) {
28,951,861✔
1114
      last_h = h;
152,961✔
1115
      /* Update `hhdr` and `sz`. */
1116
      hhdr = HDR(h);
152,961✔
1117
#ifdef MARK_BIT_PER_OBJ
1118
      sz = hhdr->hb_sz;
1119
#endif
1120
    }
1121
  }
1122
}
1,096,868✔
1123

1124
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1125
/*
1126
 * Check that all mark bits for the free list, whose first entry is
1127
 * `*pfreelist`, are set.  The check is skipped if `*pfreelist` points to
1128
 * a special value.
1129
 */
1130
void
1131
GC_check_fl_marks(void **pfreelist)
15,431,424✔
1132
{
1133
  /*
1134
   * TODO: There is a data race with `GC_FAST_MALLOC_GRANS` (which does
1135
   * not do atomic updates to the free-list).  The race seems to be
1136
   * harmless, and for now we just skip this check in case of TSan.
1137
   */
1138
#  if defined(AO_HAVE_load_acquire_read) && !defined(THREAD_SANITIZER)
1139
  ptr_t list = GC_cptr_load_acquire_read((volatile ptr_t *)pfreelist);
15,431,424✔
1140
  /* Atomic operations are used because the world is running. */
1141
  ptr_t p, prev, next;
1142

1143
  if (ADDR(list) <= HBLKSIZE)
15,431,424✔
1144
    return;
14,335,142✔
1145

1146
  prev = (ptr_t)pfreelist;
1,096,282✔
1147
  for (p = list; p != NULL; p = next) {
31,120,991✔
1148
    if (!GC_is_marked(p)) {
30,024,709✔
1149
      ABORT_ARG2("Unmarked local free-list entry", ": object %p on list %p",
×
1150
                 (void *)p, (void *)list);
1151
    }
1152

1153
    /*
1154
     * While traversing the free-list, it re-reads the pointer to the
1155
     * current node before accepting its next pointer and bails out
1156
     * if the latter has changed.  That way, it will not try to follow
1157
     * the pointer which might be been modified after the object was
1158
     * returned to the client.  It might perform the mark-check on the
1159
     * just allocated object but that should be harmless.
1160
     */
1161
    next = GC_cptr_load_acquire_read((volatile ptr_t *)p);
30,024,709✔
1162
    if (GC_cptr_load((volatile ptr_t *)prev) != p)
30,024,709✔
1163
      break;
×
1164
    prev = p;
30,024,709✔
1165
  }
1166
#  else
1167
  /* FIXME: Not implemented (just skipped). */
1168
  (void)pfreelist;
1169
#  endif
1170
}
1171
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
1172

1173
/*
1174
 * Clear all mark bits for the free list (specified by the first entry).
1175
 * Decrement `GC_bytes_found` by number of bytes on free list.
1176
 */
1177
STATIC void
1178
GC_clear_fl_marks(ptr_t q)
55,381✔
1179
{
1180
  struct hblk *h = HBLKPTR(q);
55,381✔
1181
  const struct hblk *last_h = h;
55,381✔
1182
  hdr *hhdr = HDR(h);
55,381✔
1183
  size_t sz = hhdr->hb_sz; /*< normally set only once */
55,381✔
1184

1185
  for (;;) {
5,050,224✔
1186
    size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz);
5,105,605✔
1187

1188
    if (mark_bit_from_hdr(hhdr, bit_no)) {
5,105,605✔
1189
      size_t n_marks = hhdr->hb_n_marks;
768,532✔
1190

1191
#ifdef LINT2
1192
      if (0 == n_marks)
1193
        ABORT("hhdr->hb_n_marks cannot be zero");
1194
#else
1195
      GC_ASSERT(n_marks != 0);
768,532✔
1196
#endif
1197
      clear_mark_bit_from_hdr(hhdr, bit_no);
768,532✔
1198
      n_marks--;
768,532✔
1199
#ifdef PARALLEL_MARK
1200
      /* Approximate count, do not decrement to zero! */
1201
      if (n_marks != 0 || !GC_parallel) {
768,532✔
1202
        hhdr->hb_n_marks = n_marks;
767,724✔
1203
      }
1204
#else
1205
      hhdr->hb_n_marks = n_marks;
1206
#endif
1207
    }
1208
    GC_bytes_found -= (GC_signed_word)sz;
5,105,605✔
1209

1210
    q = (ptr_t)obj_link(q);
5,105,605✔
1211
    if (NULL == q)
5,105,605✔
1212
      break;
55,381✔
1213

1214
    h = HBLKPTR(q);
5,050,224✔
1215
    if (UNLIKELY(h != last_h)) {
5,050,224✔
1216
      last_h = h;
307,619✔
1217
      /* Update `hhdr` and `sz`. */
1218
      hhdr = HDR(h);
307,619✔
1219
      sz = hhdr->hb_sz;
307,619✔
1220
    }
1221
  }
1222
}
55,381✔
1223

1224
/* Mark all objects on the free lists for every object kind. */
1225
static void
1226
set_all_fl_marks(void)
66✔
1227
{
1228
  unsigned kind;
1229

1230
  for (kind = 0; kind < GC_n_kinds; kind++) {
330✔
1231
    word size; /*< current object size */
1232

1233
    for (size = 1; size <= MAXOBJGRANULES; size++) {
34,056✔
1234
      ptr_t q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
33,792✔
1235

1236
      if (q != NULL)
33,792✔
1237
        GC_set_fl_marks(q);
564✔
1238
    }
1239
  }
1240
}
66✔
1241

1242
/*
1243
 * Clear free-list mark bits.  Also subtract memory remaining from
1244
 * `GC_bytes_found` count.
1245
 */
1246
static void
1247
clear_all_fl_marks(void)
25,469✔
1248
{
1249
  unsigned kind;
1250

1251
  for (kind = 0; kind < GC_n_kinds; kind++) {
147,560✔
1252
    word size; /*< current object size */
1253

1254
    for (size = 1; size <= MAXOBJGRANULES; size++) {
15,749,739✔
1255
      ptr_t q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
15,627,648✔
1256

1257
      if (q != NULL)
15,627,648✔
1258
        GC_clear_fl_marks(q);
55,381✔
1259
    }
1260
  }
1261
}
25,469✔
1262

1263
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1264
void GC_check_tls(void);
1265
#endif
1266

1267
GC_on_heap_resize_proc GC_on_heap_resize = 0;
1268

1269
/* Used for logging only. */
1270
GC_INLINE int
1271
GC_compute_heap_usage_percent(void)
×
1272
{
1273
  word used = GC_composite_in_use + GC_atomic_in_use + GC_bytes_allocd;
×
1274
  word heap_sz = GC_heapsize - GC_unmapped_bytes;
×
1275
#if defined(CPPCHECK)
1276
  word limit = (GC_WORD_MAX >> 1) / 50; /*< to avoid a false positive */
1277
#else
1278
  const word limit = GC_WORD_MAX / 100;
×
1279
#endif
1280

1281
  return used >= heap_sz ? 0
1282
         : used < limit  ? (int)((used * 100) / heap_sz)
×
1283
                         : (int)(used / (heap_sz / 100));
×
1284
}
1285

1286
#define GC_DBGLOG_PRINT_HEAP_IN_USE()                                        \
1287
  GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n", \
1288
                   GC_compute_heap_usage_percent(),                          \
1289
                   TO_KiB_UL(GC_composite_in_use),                           \
1290
                   TO_KiB_UL(GC_atomic_in_use + GC_bytes_allocd))
1291

1292
/*
1293
 * Finish up a collection.  Assumes mark bits are consistent, but the
1294
 * world is otherwise running.
1295
 */
1296
STATIC void
1297
GC_finish_collection(void)
25,469✔
1298
{
1299
#ifndef NO_CLOCK
1300
  CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
25,469✔
1301
  CLOCK_TYPE finalize_time = CLOCK_TYPE_INITIALIZER;
25,469✔
1302
#endif
1303

1304
  GC_ASSERT(I_HOLD_LOCK());
25,469✔
1305
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC) \
1306
    && !defined(DBG_HDRS_ALL)
1307
  /* Check that we marked some of our own data. */
1308
  GC_check_tls();
25,469✔
1309
  /* TODO: Add more checks. */
1310
#endif
1311

1312
#ifndef NO_CLOCK
1313
  if (GC_print_stats)
25,469✔
1314
    GET_TIME(start_time);
×
1315
#endif
1316
  if (GC_on_collection_event)
25,469✔
1317
    GC_on_collection_event(GC_EVENT_RECLAIM_START);
×
1318

1319
#ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1320
  if (GC_bytes_found > 0)
25,469✔
1321
    GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
25,045✔
1322
#endif
1323
  GC_bytes_found = 0;
25,469✔
1324
#if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1325
  if (GETENV("GC_PRINT_ADDRESS_MAP") != NULL) {
25,469✔
1326
    GC_print_address_map();
×
1327
  }
1328
#endif
1329
  COND_DUMP;
25,469✔
1330
  if (GC_find_leak_inner) {
25,469✔
1331
    set_all_fl_marks();
66✔
1332
    /* This just checks; it does not really reclaim anything. */
1333
    GC_start_reclaim(TRUE);
66✔
1334
  }
1335

1336
#ifndef GC_NO_FINALIZATION
1337
  GC_finalize();
25,469✔
1338
#endif
1339
#ifndef NO_CLOCK
1340
  if (GC_print_stats)
25,469✔
1341
    GET_TIME(finalize_time);
×
1342
#endif
1343
#ifdef MAKE_BACK_GRAPH
1344
  if (GC_print_back_height) {
1345
    GC_traverse_back_graph();
1346
  }
1347
#endif
1348

1349
  /*
1350
   * Clear free-list mark bits, in case they got accidentally marked
1351
   * (or `GC_find_leak` is set and they were intentionally marked).
1352
   * Note that composite objects on free list are cleared, thus
1353
   * accidentally marking a free list is not a problem; but some objects
1354
   * on the list itself might be marked, and the given function call
1355
   * fixes it.
1356
   */
1357
  clear_all_fl_marks();
25,469✔
1358

1359
  GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
25,469✔
1360
                        (long)GC_bytes_found);
×
1361

1362
  /* Reconstruct free lists to contain everything not marked. */
1363
  GC_start_reclaim(FALSE);
25,469✔
1364

1365
#ifdef USE_MUNMAP
1366
  if (GC_unmap_threshold > 0    /*< memory unmapping enabled? */
25,469✔
1367
      && LIKELY(GC_gc_no != 1)) /*< do not unmap during `GC_init` */
25,469✔
1368
    GC_unmap_old(GC_unmap_threshold);
25,435✔
1369

1370
  GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
25,469✔
1371
#endif
1372
  GC_ASSERT(GC_our_mem_bytes >= GC_heapsize);
25,469✔
1373
  GC_DBGLOG_PRINTF(
25,469✔
1374
      "GC #%lu freed %ld bytes, heap %lu KiB (" IF_USE_MUNMAP(
1375
          "+ %lu KiB unmapped ") "+ %lu KiB internal)\n",
1376
      (unsigned long)GC_gc_no, (long)GC_bytes_found,
×
1377
      TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
×
1378
      COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)),
×
1379
      TO_KiB_UL(GC_our_mem_bytes - GC_heapsize + sizeof(GC_arrays)));
×
1380
  GC_DBGLOG_PRINT_HEAP_IN_USE();
25,469✔
1381
  if (GC_is_full_gc) {
25,469✔
1382
    GC_used_heap_size_after_full = GC_heapsize - GC_large_free_bytes;
11,968✔
1383
    GC_need_full_gc = FALSE;
11,968✔
1384
  } else {
1385
    GC_need_full_gc = GC_heapsize - GC_used_heap_size_after_full
27,002✔
1386
                      > min_bytes_allocd() + GC_large_free_bytes;
13,501✔
1387
  }
1388

1389
  /* Reset or increment counters for next cycle. */
1390
  GC_n_attempts = 0;
25,469✔
1391
  GC_is_full_gc = FALSE;
25,469✔
1392
  GC_bytes_allocd_before_gc += GC_bytes_allocd;
25,469✔
1393
  GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
25,469✔
1394
  GC_bytes_allocd = 0;
25,469✔
1395
  GC_bytes_dropped = 0;
25,469✔
1396
  GC_bytes_freed = 0;
25,469✔
1397
  GC_finalizer_bytes_freed = 0;
25,469✔
1398

1399
  if (GC_on_collection_event)
25,469✔
1400
    GC_on_collection_event(GC_EVENT_RECLAIM_END);
×
1401
#ifndef NO_CLOCK
1402
  if (GC_print_stats) {
25,469✔
1403
    CLOCK_TYPE done_time;
1404

1405
    GET_TIME(done_time);
×
1406
#  if !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1407
    /* A convenient place to output finalization statistics. */
1408
    GC_print_finalization_stats();
×
1409
#  endif
1410
    GC_log_printf(
×
1411
        "Finalize and initiate sweep took %lu ms %lu ns + %lu ms %lu ns\n",
1412
        MS_TIME_DIFF(finalize_time, start_time),
×
1413
        NS_FRAC_TIME_DIFF(finalize_time, start_time),
×
1414
        MS_TIME_DIFF(done_time, finalize_time),
×
1415
        NS_FRAC_TIME_DIFF(done_time, finalize_time));
×
1416
  }
1417
#elif !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1418
  if (GC_print_stats)
1419
    GC_print_finalization_stats();
1420
#endif
1421
}
25,469✔
1422

1423
/* Note: if `stop_func` is 0, then `GC_default_stop_func` is used instead. */
1424
STATIC GC_bool
1425
GC_try_to_collect_general(GC_stop_func stop_func, GC_bool force_unmap)
2,458✔
1426
{
1427
  GC_bool result;
1428
#ifdef USE_MUNMAP
1429
  unsigned old_unmap_threshold;
1430
#endif
1431
  IF_CANCEL(int cancel_state;)
1432

1433
  if (UNLIKELY(!GC_is_initialized))
2,458✔
1434
    GC_init();
×
1435
  if (GC_debugging_started)
2,458✔
1436
    GC_print_all_smashed();
148✔
1437
  GC_notify_or_invoke_finalizers();
2,458✔
1438
  LOCK();
2,458✔
1439
  if (force_unmap) {
2,458✔
1440
    /*
1441
     * Record current heap size to make heap growth more conservative
1442
     * afterwards (as if the heap is growing from zero size again).
1443
     */
1444
    GC_heapsize_at_forced_unmap = GC_heapsize;
70✔
1445
  }
1446
  DISABLE_CANCEL(cancel_state);
2,458✔
1447
#ifdef USE_MUNMAP
1448
  old_unmap_threshold = GC_unmap_threshold;
2,458✔
1449
  if (force_unmap || (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
2,458✔
1450
    GC_unmap_threshold = 1; /*< unmap as much as possible */
70✔
1451
#endif
1452
  /* Minimize junk left in my registers. */
1453
  GC_noop6(0, 0, 0, 0, 0, 0);
2,458✔
1454
  result = GC_try_to_collect_inner(stop_func != 0 ? stop_func
2,458✔
1455
                                                  : GC_default_stop_func);
1456
#ifdef USE_MUNMAP
1457
  /* Restore it. */
1458
  GC_unmap_threshold = old_unmap_threshold;
2,458✔
1459
#endif
1460
  RESTORE_CANCEL(cancel_state);
2,458✔
1461
  UNLOCK();
2,458✔
1462
  if (result) {
2,458✔
1463
    if (GC_debugging_started)
2,416✔
1464
      GC_print_all_smashed();
148✔
1465
    GC_notify_or_invoke_finalizers();
2,416✔
1466
  }
1467
  return result;
2,458✔
1468
}
1469

1470
/* Externally callable routines to invoke full, stop-the-world collection. */
1471

1472
GC_API int GC_CALL
1473
GC_try_to_collect(GC_stop_func stop_func)
×
1474
{
1475
  GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
×
1476
  return (int)GC_try_to_collect_general(stop_func, FALSE);
×
1477
}
1478

1479
GC_API void GC_CALL
1480
GC_gcollect(void)
2,388✔
1481
{
1482
  /*
1483
   * Zero is passed as stop_func to get `GC_default_stop_func` value
1484
   * while holding the allocator lock (to prevent data race).
1485
   */
1486
  (void)GC_try_to_collect_general(0, FALSE);
2,388✔
1487
  if (get_have_errors())
2,388✔
1488
    GC_print_all_errors();
56✔
1489
}
2,388✔
1490

1491
GC_API void GC_CALL
1492
GC_gcollect_and_unmap(void)
70✔
1493
{
1494
  /* Collect and force memory unmapping to OS. */
1495
  (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
70✔
1496
}
70✔
1497

1498
STATIC GC_on_os_get_mem_proc GC_on_os_get_mem = 0;
1499

1500
GC_API void GC_CALL
1501
GC_set_on_os_get_mem(GC_on_os_get_mem_proc fn)
2✔
1502
{
1503
  /* `fn` may be 0 (means no event notifier). */
1504
  LOCK();
2✔
1505
  GC_on_os_get_mem = fn;
2✔
1506
  UNLOCK();
2✔
1507
}
2✔
1508

1509
GC_API GC_on_os_get_mem_proc GC_CALL
1510
GC_get_on_os_get_mem(void)
2✔
1511
{
1512
  GC_on_os_get_mem_proc fn;
1513

1514
  READER_LOCK();
2✔
1515
  fn = GC_on_os_get_mem;
2✔
1516
  READER_UNLOCK();
2✔
1517
  return fn;
2✔
1518
}
1519

1520
GC_INNER ptr_t
1521
GC_os_get_mem(size_t bytes)
2,187✔
1522
{
1523
  ptr_t space;
1524

1525
  GC_ASSERT(I_HOLD_LOCK());
2,187✔
1526
  space = (ptr_t)GET_MEM(bytes); /*< `HBLKSIZE`-aligned */
2,187✔
1527
  if (GC_on_os_get_mem)
2,187✔
1528
    (*GC_on_os_get_mem)(space, bytes);
×
1529
  if (UNLIKELY(NULL == space))
2,187✔
1530
    return NULL;
×
1531
#ifdef USE_PROC_FOR_LIBRARIES
1532
  /* Add `HBLKSIZE`-aligned `GET_MEM`-generated block to `GC_our_memory`. */
1533
  if (GC_n_memory >= MAX_HEAP_SECTS)
1534
    ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1535
  GC_our_memory[GC_n_memory].hs_start = space;
1536
  GC_our_memory[GC_n_memory].hs_bytes = bytes;
1537
  GC_n_memory++;
1538
#endif
1539
  GC_our_mem_bytes += bytes;
2,187✔
1540
  GC_VERBOSE_LOG_PRINTF("Got %lu bytes from OS\n", (unsigned long)bytes);
2,187✔
1541
  return space;
2,187✔
1542
}
1543

1544
/*
1545
 * Use the chunk of memory starting at `h` of size `sz` as part of the heap.
1546
 * Assumes `h` is `HBLKSIZE`-aligned, `sz` is a multiple of `HBLKSIZE`.
1547
 */
1548
STATIC void
1549
GC_add_to_heap(struct hblk *h, size_t sz)
751✔
1550
{
1551
  hdr *hhdr;
1552
  ptr_t endp;
1553
  size_t old_capacity = 0;
751✔
1554
  void *old_heap_sects = NULL;
751✔
1555
#ifdef GC_ASSERTIONS
1556
  size_t i;
1557
#endif
1558

1559
  GC_ASSERT(I_HOLD_LOCK());
751✔
1560
  GC_ASSERT(ADDR(h) % HBLKSIZE == 0);
751✔
1561
  GC_ASSERT(sz % HBLKSIZE == 0);
751✔
1562
  GC_ASSERT(sz > 0);
751✔
1563
  GC_ASSERT(GC_all_nils != NULL);
751✔
1564

1565
  if (UNLIKELY(GC_n_heap_sects == GC_capacity_heap_sects)) {
751✔
1566
    /* Allocate new `GC_heap_sects` with sufficient capacity. */
1567
#ifndef INITIAL_HEAP_SECTS
1568
#  define INITIAL_HEAP_SECTS 32
1569
#endif
1570
    size_t new_capacity
41✔
1571
        = GC_n_heap_sects > 0 ? GC_n_heap_sects * 2 : INITIAL_HEAP_SECTS;
41✔
1572
    void *new_heap_sects
1573
        = GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
41✔
1574

1575
    if (NULL == new_heap_sects) {
41✔
1576
      /* Retry with smaller yet sufficient capacity. */
1577
      new_capacity = GC_n_heap_sects + INITIAL_HEAP_SECTS;
×
1578
      new_heap_sects
1579
          = GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
×
1580
      if (NULL == new_heap_sects)
×
1581
        ABORT("Insufficient memory for heap sections");
×
1582
    }
1583
    old_capacity = GC_capacity_heap_sects;
41✔
1584
    old_heap_sects = GC_heap_sects;
41✔
1585
    /* Transfer `GC_heap_sects` contents to the newly allocated array. */
1586
    if (GC_n_heap_sects > 0)
41✔
1587
      BCOPY(old_heap_sects, new_heap_sects,
7✔
1588
            GC_n_heap_sects * sizeof(struct HeapSect));
1589
    GC_capacity_heap_sects = new_capacity;
41✔
1590
    GC_heap_sects = (struct HeapSect *)new_heap_sects;
41✔
1591
    GC_COND_LOG_PRINTF("Grew heap sections array to %lu elements\n",
41✔
1592
                       (unsigned long)new_capacity);
1593
  }
1594

1595
  while (UNLIKELY(ADDR(h) <= HBLKSIZE)) {
751✔
1596
    /* Cannot handle memory near address zero. */
1597
    ++h;
×
1598
    sz -= HBLKSIZE;
×
1599
    if (0 == sz)
×
1600
      return;
×
1601
  }
1602
  while (UNLIKELY(ADDR(h) >= GC_WORD_MAX - sz)) {
751✔
1603
    /* Prevent overflow when calculating `endp`. */
1604
    sz -= HBLKSIZE;
×
1605
    if (0 == sz)
×
1606
      return;
×
1607
  }
1608
  endp = (ptr_t)h + sz;
751✔
1609

1610
  hhdr = GC_install_header(h);
751✔
1611
  if (UNLIKELY(NULL == hhdr)) {
751✔
1612
    /*
1613
     * This is extremely unlikely. Cannot add it.  This will almost
1614
     * certainly result in a `NULL` returned from the allocator, which
1615
     * is entirely appropriate.
1616
     */
1617
    return;
×
1618
  }
1619
#ifdef GC_ASSERTIONS
1620
  /* Ensure no intersection between sections. */
1621
  for (i = 0; i < GC_n_heap_sects; i++) {
26,430✔
1622
    ptr_t hs_start = GC_heap_sects[i].hs_start;
25,679✔
1623
    ptr_t hs_end = hs_start + GC_heap_sects[i].hs_bytes;
25,679✔
1624

1625
    GC_ASSERT(!(ADDR_INSIDE((ptr_t)h, hs_start, hs_end)
25,679✔
1626
                || (ADDR_LT(hs_start, endp) && ADDR_GE(hs_end, endp))
1627
                || (ADDR_LT((ptr_t)h, hs_start) && ADDR_LT(hs_end, endp))));
1628
  }
1629
#endif
1630
  GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)h;
751✔
1631
  GC_heap_sects[GC_n_heap_sects].hs_bytes = sz;
751✔
1632
  GC_n_heap_sects++;
751✔
1633
  hhdr->hb_block = h;
751✔
1634
  hhdr->hb_sz = sz;
751✔
1635
  hhdr->hb_flags = 0;
751✔
1636
  GC_freehblk(h);
751✔
1637
  GC_heapsize += sz;
751✔
1638

1639
  if (ADDR_GE((ptr_t)GC_least_plausible_heap_addr, (ptr_t)h)
751✔
1640
      || UNLIKELY(NULL == GC_least_plausible_heap_addr)) {
713✔
1641
    /*
1642
     * Making it a little smaller than necessary prevents us from
1643
     * getting a false hit from the variable itself.  There is some
1644
     * unintentional reflection here.
1645
     */
1646
    GC_least_plausible_heap_addr = (ptr_t)h - sizeof(ptr_t);
38✔
1647
  }
1648
  if (ADDR_LT((ptr_t)GC_greatest_plausible_heap_addr, endp)) {
751✔
1649
    GC_greatest_plausible_heap_addr = endp;
×
1650
  }
1651
#ifdef SET_REAL_HEAP_BOUNDS
1652
  if (ADDR(h) < GC_least_real_heap_addr
751✔
1653
      || UNLIKELY(0 == GC_least_real_heap_addr))
205✔
1654
    GC_least_real_heap_addr = ADDR(h) - sizeof(ptr_t);
580✔
1655
  if (GC_greatest_real_heap_addr < ADDR(endp)) {
751✔
1656
#  ifdef INCLUDE_LINUX_THREAD_DESCR
1657
    /* Avoid heap intersection with the static data roots. */
1658
    GC_exclude_static_roots_inner((ptr_t)h, endp);
1659
#  endif
1660
    GC_greatest_real_heap_addr = ADDR(endp);
53✔
1661
  }
1662
#endif
1663
  GC_handle_protected_regions_limit();
751✔
1664
  if (UNLIKELY(old_capacity > 0)) {
751✔
1665
#ifndef GWW_VDB
1666
    /*
1667
     * Recycling may call `GC_add_to_heap()` again but should not cause
1668
     * resizing of `GC_heap_sects`.
1669
     */
1670
    GC_scratch_recycle_no_gww(old_heap_sects,
7✔
1671
                              old_capacity * sizeof(struct HeapSect));
1672
#else
1673
    /* TODO: Implement GWW-aware recycling as in `alloc_mark_stack`. */
1674
    GC_noop1_ptr(old_heap_sects);
1675
#endif
1676
  }
1677
}
1678

1679
#ifndef NO_DEBUGGING
1680
void
1681
GC_print_heap_sects(void)
×
1682
{
1683
  size_t i;
1684

1685
  GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
×
1686
            (unsigned long)GC_heapsize /*, */
×
1687
                COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
×
1688

1689
  for (i = 0; i < GC_n_heap_sects; i++) {
×
1690
    ptr_t start = GC_heap_sects[i].hs_start;
×
1691
    size_t len = GC_heap_sects[i].hs_bytes;
×
1692
    unsigned nbl = 0;
×
1693
#  ifndef NO_BLACK_LISTING
1694
    struct hblk *h;
1695

1696
    for (h = (struct hblk *)start; ADDR_LT((ptr_t)h, start + len); h++) {
×
1697
      if (GC_is_black_listed(h, HBLKSIZE))
×
1698
        nbl++;
×
1699
    }
1700
#  endif
1701
    GC_printf("Section %u from %p to %p %u/%lu blacklisted\n", (unsigned)i,
×
1702
              (void *)start, (void *)&start[len], nbl,
×
1703
              (unsigned long)divHBLKSZ(len));
×
1704
  }
1705
}
×
1706
#endif /* !NO_DEBUGGING */
1707

1708
void *GC_least_plausible_heap_addr = MAKE_CPTR(GC_WORD_MAX);
1709
void *GC_greatest_plausible_heap_addr = NULL;
1710

1711
STATIC word GC_max_heapsize = 0;
1712

1713
GC_API void GC_CALL
1714
GC_set_max_heap_size(GC_word n)
2✔
1715
{
1716
  GC_max_heapsize = n;
2✔
1717
}
2✔
1718

1719
word GC_max_retries = 0;
1720

1721
GC_INNER void
1722
GC_scratch_recycle_inner(void *ptr, size_t sz)
132✔
1723
{
1724
  size_t page_offset;
1725
  size_t displ = 0;
132✔
1726
  size_t recycled_bytes;
1727

1728
  GC_ASSERT(I_HOLD_LOCK());
132✔
1729
  if (NULL == ptr)
132✔
1730
    return;
×
1731

1732
  GC_ASSERT(sz != 0);
132✔
1733
  GC_ASSERT(GC_page_size != 0);
132✔
1734
  /* TODO: Assert correct memory flags if `GWW_VDB`. */
1735
  page_offset = ADDR(ptr) & (GC_page_size - 1);
132✔
1736
  if (page_offset != 0)
132✔
1737
    displ = GC_page_size - page_offset;
3✔
1738
  recycled_bytes = sz > displ ? (sz - displ) & ~(GC_page_size - 1) : 0;
132✔
1739
  GC_COND_LOG_PRINTF("Recycle %lu/%lu scratch-allocated bytes at %p\n",
132✔
1740
                     (unsigned long)recycled_bytes, (unsigned long)sz, ptr);
1741
  if (recycled_bytes > 0)
132✔
1742
    GC_add_to_heap((struct hblk *)((ptr_t)ptr + displ), recycled_bytes);
125✔
1743
}
1744

1745
GC_INNER GC_bool
1746
GC_expand_hp_inner(word n)
646✔
1747
{
1748
  size_t sz;
1749
  struct hblk *space;
1750
  /* Number of bytes by which we expect the heap to expand soon. */
1751
  word expansion_slop;
1752

1753
  GC_ASSERT(I_HOLD_LOCK());
646✔
1754
  GC_ASSERT(GC_page_size != 0);
646✔
1755
  if (0 == n)
646✔
1756
    n = 1;
2✔
1757
  sz = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
646✔
1758
  GC_DBGLOG_PRINT_HEAP_IN_USE();
646✔
1759
  if (GC_max_heapsize != 0
646✔
1760
      && (GC_max_heapsize < (word)sz
24✔
1761
          || GC_heapsize > GC_max_heapsize - (word)sz)) {
4✔
1762
    /* Exceeded the self-imposed limit. */
1763
    return FALSE;
20✔
1764
  }
1765
  space = (struct hblk *)GC_os_get_mem(sz);
626✔
1766
  if (UNLIKELY(NULL == space)) {
626✔
1767
    WARN("Failed to expand heap by %" WARN_PRIuPTR " KiB\n", sz >> 10);
×
1768
    return FALSE;
×
1769
  }
1770
  GC_last_heap_growth_gc_no = GC_gc_no;
626✔
1771
  GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
626✔
1772
                    TO_KiB_UL(GC_heapsize + sz),
×
1773
                    (unsigned long)GC_bytes_allocd);
×
1774

1775
  /*
1776
   * Adjust heap limits generously for black-listing to work better.
1777
   * `GC_add_to_heap()` performs minimal adjustment needed for correctness.
1778
   */
1779
  expansion_slop = min_bytes_allocd() + 4 * MAXHINCR * HBLKSIZE;
626✔
1780
  if ((0 == GC_last_heap_addr && (ADDR(space) & SIGNB) == 0)
626✔
1781
      || (GC_last_heap_addr != 0 && GC_last_heap_addr < ADDR(space))) {
592✔
1782
    /* Assume the heap is growing up. */
1783
    if (LIKELY(ADDR(space) < GC_WORD_MAX - (sz + expansion_slop))) {
84✔
1784
      ptr_t new_limit = (ptr_t)space + sz + expansion_slop;
42✔
1785

1786
      if (ADDR_LT((ptr_t)GC_greatest_plausible_heap_addr, new_limit))
42✔
1787
        GC_greatest_plausible_heap_addr = new_limit;
36✔
1788
    }
1789
  } else {
1790
    /* Heap is growing down. */
1791
    if (LIKELY(ADDR(space) > expansion_slop + sizeof(ptr_t))) {
584✔
1792
      ptr_t new_limit = (ptr_t)space - expansion_slop - sizeof(ptr_t);
584✔
1793

1794
      if (ADDR_LT(new_limit, (ptr_t)GC_least_plausible_heap_addr))
584✔
1795
        GC_least_plausible_heap_addr = new_limit;
492✔
1796
    }
1797
  }
1798
  GC_last_heap_addr = ADDR(space);
626✔
1799

1800
  GC_add_to_heap(space, sz);
626✔
1801
  if (GC_on_heap_resize)
626✔
1802
    (*GC_on_heap_resize)(GC_heapsize);
×
1803

1804
  return TRUE;
626✔
1805
}
1806

1807
GC_API int GC_CALL
1808
GC_expand_hp(size_t bytes)
4✔
1809
{
1810
  size_t n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(bytes);
4✔
1811
#ifndef NO_WARN_HEAP_GROW_WHEN_GC_DISABLED
1812
  word old_heapsize;
1813
#endif
1814
  GC_bool result;
1815

1816
  if (UNLIKELY(!GC_is_initialized))
4✔
1817
    GC_init();
×
1818
  LOCK();
4✔
1819
#ifndef NO_WARN_HEAP_GROW_WHEN_GC_DISABLED
1820
  old_heapsize = GC_heapsize;
4✔
1821
#endif
1822
  result = GC_expand_hp_inner(n_blocks);
4✔
1823
  if (result) {
4✔
1824
    GC_requested_heapsize += bytes;
4✔
1825
#ifndef NO_WARN_HEAP_GROW_WHEN_GC_DISABLED
1826
    if (GC_dont_gc) {
4✔
1827
      /* Do not call `WARN()` if the heap growth is intentional. */
1828
      GC_ASSERT(GC_heapsize >= old_heapsize);
2✔
1829
      GC_heapsize_on_gc_disable += GC_heapsize - old_heapsize;
2✔
1830
    }
1831
#endif
1832
  }
1833
  UNLOCK();
4✔
1834
  /*
1835
   * Really returns a `GC_bool` value, but the function is externally
1836
   * visible, so that is clumsy.
1837
   */
1838
  return (int)result;
4✔
1839
}
1840

1841
/*
1842
 * The minimum value of the ratio of allocated bytes since the latest
1843
 * collection to the amount of finalizers created since that collection
1844
 * which triggers the collection instead heap expansion.  Has no effect
1845
 * in the incremental mode.
1846
 */
1847
#if defined(GC_ALLOCD_BYTES_PER_FINALIZER) && !defined(CPPCHECK)
1848
STATIC word GC_allocd_bytes_per_finalizer = GC_ALLOCD_BYTES_PER_FINALIZER;
1849
#else
1850
STATIC word GC_allocd_bytes_per_finalizer = 10000;
1851
#endif
1852

1853
GC_API void GC_CALL
1854
GC_set_allocd_bytes_per_finalizer(GC_word value)
2✔
1855
{
1856
  GC_allocd_bytes_per_finalizer = value;
2✔
1857
}
2✔
1858

1859
GC_API GC_word GC_CALL
1860
GC_get_allocd_bytes_per_finalizer(void)
2✔
1861
{
1862
  return GC_allocd_bytes_per_finalizer;
2✔
1863
}
1864

1865
GC_INNER GC_bool
1866
GC_collect_or_expand(word needed_blocks, unsigned flags, GC_bool retry)
9,133✔
1867
{
1868
  static word last_fo_entries, last_bytes_finalized;
1869

1870
  GC_bool gc_not_stopped = TRUE;
9,133✔
1871
  word blocks_to_get;
1872
  IF_CANCEL(int cancel_state;)
1873

1874
  GC_ASSERT(I_HOLD_LOCK());
9,133✔
1875
  GC_ASSERT(GC_is_initialized);
9,133✔
1876
  DISABLE_CANCEL(cancel_state);
9,133✔
1877
  if (!GC_incremental && !GC_dont_gc
9,133✔
1878
      && ((GC_dont_expand && GC_bytes_allocd > 0)
8,604✔
1879
          || (GC_fo_entries > last_fo_entries
8,604✔
1880
              && (last_bytes_finalized | GC_bytes_finalized) != 0
474✔
1881
              && (GC_fo_entries - last_fo_entries)
444✔
1882
                         * GC_allocd_bytes_per_finalizer
444✔
1883
                     > GC_bytes_allocd)
444✔
1884
          || GC_should_collect())) {
8,160✔
1885
    /*
1886
     * Try to do a full collection using "default" `stop_func` (unless
1887
     * nothing has been allocated since the latest collection or heap
1888
     * expansion is disabled).
1889
     */
1890
    gc_not_stopped = GC_try_to_collect_inner(
8,525✔
1891
        GC_bytes_allocd > 0 && (!GC_dont_expand || !retry)
8,525✔
1892
            ? GC_default_stop_func
1893
            : GC_never_stop_func);
1894
    if (gc_not_stopped || !retry) {
8,525✔
1895
      /*
1896
       * Either the collection has not been aborted or this is the
1897
       * first attempt (in a loop).
1898
       */
1899
      last_fo_entries = GC_fo_entries;
8,525✔
1900
      last_bytes_finalized = GC_bytes_finalized;
8,525✔
1901
      RESTORE_CANCEL(cancel_state);
8,525✔
1902
      return TRUE;
9,133✔
1903
    }
1904
  }
1905

1906
  blocks_to_get = (GC_heapsize - GC_heapsize_at_forced_unmap)
608✔
1907
                      / (HBLKSIZE * GC_free_space_divisor)
608✔
1908
                  + needed_blocks;
1909
  if (blocks_to_get > MAXHINCR) {
608✔
1910
#ifdef NO_BLACK_LISTING
1911
    UNUSED_ARG(flags);
1912
    blocks_to_get = needed_blocks > MAXHINCR ? needed_blocks : MAXHINCR;
1913
#else
1914
    word slop;
1915

1916
    /*
1917
     * Get the minimum required to make it likely that we can satisfy
1918
     * the current request in the presence of black-listing.  This will
1919
     * probably be bigger than `MAXHINCR`.
1920
     */
1921
    if ((flags & IGNORE_OFF_PAGE) != 0) {
71✔
1922
      slop = 4;
×
1923
    } else {
1924
      slop = 2 * divHBLKSZ(BL_LIMIT);
71✔
1925
      if (slop > needed_blocks)
71✔
1926
        slop = needed_blocks;
51✔
1927
    }
1928
    if (needed_blocks + slop > MAXHINCR) {
71✔
1929
      blocks_to_get = needed_blocks + slop;
20✔
1930
    } else {
1931
      blocks_to_get = MAXHINCR;
51✔
1932
    }
1933
#endif
1934
    if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
71✔
1935
      blocks_to_get = divHBLKSZ(GC_WORD_MAX);
10✔
1936
  } else if (blocks_to_get < MINHINCR) {
537✔
1937
    blocks_to_get = MINHINCR;
132✔
1938
  }
1939

1940
  if (GC_max_heapsize > GC_heapsize) {
608✔
1941
    word max_get_blocks = divHBLKSZ(GC_max_heapsize - GC_heapsize);
20✔
1942
    if (blocks_to_get > max_get_blocks)
20✔
1943
      blocks_to_get
1944
          = max_get_blocks > needed_blocks ? max_get_blocks : needed_blocks;
20✔
1945
  }
1946

1947
#ifdef USE_MUNMAP
1948
  if (GC_unmap_threshold > 1) {
608✔
1949
    /*
1950
     * Return as much memory to the OS as possible before trying to
1951
     * get memory from it.
1952
     */
1953
    GC_unmap_old(0);
608✔
1954
  }
1955
#endif
1956
  if (!GC_expand_hp_inner(blocks_to_get)
608✔
1957
      && (blocks_to_get == needed_blocks
20✔
1958
          || !GC_expand_hp_inner(needed_blocks))) {
×
1959
    if (!gc_not_stopped) {
20✔
1960
      /* Do not increment `GC_alloc_fail_count` here (and no warning). */
1961
      GC_gcollect_inner();
×
1962
      GC_ASSERT(0 == GC_bytes_allocd);
×
1963
    } else if (GC_alloc_fail_count++ < GC_max_retries) {
20✔
1964
      WARN("Out of Memory!  Trying to continue...\n", 0);
×
1965
      GC_gcollect_inner();
×
1966
    } else {
1967
#ifdef USE_MUNMAP
1968
      GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
20✔
1969
#endif
1970
#if !defined(SMALL_CONFIG) && (CPP_WORDSZ >= 32)
1971
#  define MAX_HEAPSIZE_WARNED_IN_BYTES (5 << 20) /*< 5 MB */
1972

1973
      if (GC_heapsize > (word)MAX_HEAPSIZE_WARNED_IN_BYTES) {
20✔
1974
        WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " MiB."
×
1975
             " Returning NULL!\n",
1976
             (GC_heapsize - GC_unmapped_bytes) >> 20);
1977
      } else
1978
#endif
1979
      /* else */ {
1980
        WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " bytes."
20✔
1981
             " Returning NULL!\n",
1982
             GC_heapsize - GC_unmapped_bytes);
1983
      }
1984
      RESTORE_CANCEL(cancel_state);
20✔
1985
      return FALSE;
20✔
1986
    }
1987
  } else if (GC_alloc_fail_count > 0) {
588✔
1988
    GC_COND_LOG_PRINTF("Memory available again...\n");
×
1989
  }
1990
  RESTORE_CANCEL(cancel_state);
588✔
1991
  return TRUE;
588✔
1992
}
1993

1994
GC_INNER ptr_t
1995
GC_allocobj(size_t lg, int kind)
776,638✔
1996
{
1997
#define MAX_ALLOCOBJ_RETRIES 3
1998
  int retry_cnt = 0;
776,638✔
1999
  void **flh = &GC_obj_kinds[kind].ok_freelist[lg];
776,638✔
2000
#ifndef GC_DISABLE_INCREMENTAL
2001
  GC_bool tried_minor = FALSE;
776,638✔
2002
#endif
2003

2004
  GC_ASSERT(I_HOLD_LOCK());
776,638✔
2005
  GC_ASSERT(GC_is_initialized);
776,638✔
2006
  if (UNLIKELY(0 == lg))
776,638✔
2007
    return NULL;
×
2008

2009
  while (NULL == *flh) {
783,074✔
2010
    /*
2011
     * Only a few iterations are expected at most, otherwise something
2012
     * is wrong in one of the functions called below.
2013
     */
2014
    if (retry_cnt > MAX_ALLOCOBJ_RETRIES)
783,074✔
2015
      ABORT("Too many retries in GC_allocobj");
×
2016
#ifndef GC_DISABLE_INCREMENTAL
2017
    if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED && !GC_dont_gc) {
783,074✔
2018
      /*
2019
       * True incremental mode, not just generational.
2020
       * Do our share of marking work.
2021
       */
2022
      GC_collect_a_little_inner(1);
×
2023
    }
2024
#endif
2025
    /* Sweep blocks for objects of this size. */
2026
    GC_ASSERT(!GC_is_full_gc || NULL == GC_obj_kinds[kind].ok_reclaim_list
783,074✔
2027
              || NULL == GC_obj_kinds[kind].ok_reclaim_list[lg]);
2028
    GC_continue_reclaim(lg, kind);
783,074✔
2029
#if defined(CPPCHECK)
2030
    GC_noop1_ptr(&flh);
2031
#endif
2032
    if (*flh != NULL)
783,074✔
2033
      break;
123,637✔
2034

2035
    GC_new_hblk(lg, kind);
659,437✔
2036
#if defined(CPPCHECK)
2037
    GC_noop1_ptr(&flh);
2038
#endif
2039
    if (*flh != NULL)
659,437✔
2040
      break;
653,001✔
2041

2042
#ifndef GC_DISABLE_INCREMENTAL
2043
    if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED && !tried_minor
6,436✔
2044
        && !GC_dont_gc) {
326✔
2045
      GC_collect_a_little_inner(1);
253✔
2046
      tried_minor = TRUE;
253✔
2047
      continue;
253✔
2048
    }
2049
#endif
2050
    if (UNLIKELY(!GC_collect_or_expand(1, 0 /* `flags` */, retry_cnt > 0)))
6,183✔
2051
      return NULL;
×
2052
    retry_cnt++;
6,183✔
2053
  }
2054
  /* Successful allocation; reset failure count. */
2055
  GC_alloc_fail_count = 0;
776,638✔
2056
  return (ptr_t)(*flh);
776,638✔
2057
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc