• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 1943

27 Nov 2025 11:43AM UTC coverage: 73.064% (-0.5%) from 73.519%
1943

push

travis-ci

ivmai
Remove outdated README.uts file
(documentation)

* CMakeLists.txt [enable_docs] (FILES): Do not install `README.uts`.
* Makefile.am (dist_docdocsplatforms_DATA): Remove `README.uts`.
* docs/platforms/README.uts: Remove file.

6464 of 8847 relevant lines covered (73.06%)

13704245.84 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.2
/alloc.c
1
/*
2
 * Copyright (c) 1988-1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 1999-2011 Hewlett-Packard Development Company, L.P.
6
 * Copyright (c) 2008-2025 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_priv.h"
19

20
/*
21
 * Separate free lists are maintained for different sized objects up
22
 * to `MAXOBJBYTES`.
23
 * The call `GC_allocobj(lg, kind)` ensures that the free list for the given
24
 * kind of objects of the given size in granules is a nonempty one.
25
 * It returns a pointer to the first entry on the free list.
26
 * In a single-threaded world, `GC_allocobj` may be called to allocate
27
 * an object of small size `lb` (and `NORMAL` kind) as follows
28
 * (`GC_generic_malloc_inner` is a wrapper over `GC_allocobj` which also
29
 * fills in `GC_size_map` if needed):
30
 *
31
 * ```
32
 *   lg = GC_size_map[lb];
33
 *   op = GC_objfreelist[lg];
34
 *   if (NULL == op) {
35
 *     op = GC_generic_malloc_inner(lb, NORMAL, 0);
36
 *   } else {
37
 *     GC_objfreelist[lg] = obj_link(op);
38
 *     GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
39
 *   }
40
 * ```
41
 *
42
 * Note that this is very fast if the free list is not empty; it should
43
 * only involve the execution of 4 or 5 simple instructions.
44
 * All composite objects on freelists are cleared, except for
45
 * their first "pointer-sized" word.
46
 */
47

48
/*
49
 * The allocator uses `GC_allochblk` to allocate large chunks of objects.
50
 * These chunks all start on addresses that are multiples of `HBLKSIZE`.
51
 * Each allocated chunk has an associated header, which can be located
52
 * quickly based on the address of the chunk.  This makes it possible
53
 * to check quickly whether an arbitrary address corresponds to an object
54
 * administered by the allocator.  (See `headers.c` file for details.)
55
 */
56

57
/* Number of bytes not intended to be collected. */
58
word GC_non_gc_bytes = 0;
59

60
word GC_gc_no = 0;
61

62
#ifndef NO_CLOCK
63

64
GC_API void GC_CALL
65
GC_start_performance_measurement(void)
2✔
66
{
67
  GC_measure_performance = TRUE;
2✔
68
}
2✔
69

70
GC_API unsigned long GC_CALL
71
GC_get_full_gc_total_time(void)
2✔
72
{
73
  return GC_full_gc_total_time;
2✔
74
}
75

76
GC_API unsigned long GC_CALL
77
GC_get_stopped_mark_total_time(void)
2✔
78
{
79
  return GC_stopped_mark_total_time;
2✔
80
}
81

82
#  ifndef MAX_TOTAL_TIME_DIVISOR
83
/*
84
 * We shall not use big values here (so "outdated" delay time values would
85
 * have less impact on "average" delay time value than newer ones).
86
 */
87
#    define MAX_TOTAL_TIME_DIVISOR 1000
88
#  endif
89

90
GC_API unsigned long GC_CALL
91
GC_get_avg_stopped_mark_time_ns(void)
2✔
92
{
93
  unsigned long total_time;
94
  unsigned divisor;
95

96
  READER_LOCK();
2✔
97
  total_time = (unsigned long)GC_world_stopped_total_time;
2✔
98
  divisor = GC_world_stopped_total_divisor;
2✔
99
  READER_UNLOCK();
2✔
100
  if (0 == divisor) {
2✔
101
    GC_ASSERT(0 == total_time);
×
102
    /*
103
     * No world-stopped collection has occurred since the start of
104
     * performance measurements.
105
     */
106
    return 0;
×
107
  }
108

109
  /* Halve values to prevent overflow during the multiplication. */
110
  for (; total_time > ~0UL / (1000UL * 1000); total_time >>= 1) {
2✔
111
    divisor >>= 1;
×
112
    if (UNLIKELY(0 == divisor)) {
×
113
      /* The actual result is larger than representable value. */
114
      return ~0UL;
×
115
    }
116
  }
117

118
  return total_time * (1000UL * 1000) / divisor;
2✔
119
}
120

121
#endif /* !NO_CLOCK */
122

123
GC_API int GC_CALL
124
GC_is_incremental_mode(void)
16✔
125
{
126
  return (int)GC_incremental;
16✔
127
}
128

129
#ifdef THREADS
130
int GC_parallel = FALSE; /*< parallel collection is off by default */
131
#endif
132

133
#if defined(GC_FULL_FREQ) && !defined(CPPCHECK)
134
int GC_full_freq = GC_FULL_FREQ;
135
#else
136
/*
137
 * Every `GC_full_freq + 1` collection is a full collection, whether we
138
 * need it or not.
139
 */
140
int GC_full_freq = 19;
141
#endif
142

143
#ifdef THREAD_LOCAL_ALLOC
144
GC_INNER GC_bool GC_world_stopped = FALSE;
145
#endif
146

147
STATIC GC_bool GC_disable_automatic_collection = FALSE;
148

149
GC_API void GC_CALL
150
GC_set_disable_automatic_collection(int value)
2✔
151
{
152
  LOCK();
2✔
153
  GC_disable_automatic_collection = (GC_bool)value;
2✔
154
  UNLOCK();
2✔
155
}
2✔
156

157
GC_API int GC_CALL
158
GC_get_disable_automatic_collection(void)
2✔
159
{
160
  int value;
161

162
  READER_LOCK();
2✔
163
  value = (int)GC_disable_automatic_collection;
2✔
164
  READER_UNLOCK();
2✔
165
  return value;
2✔
166
}
167

168
/*
169
 * The version macros are now defined in `gc_version.h` file, which is
170
 * included by `gc.h` file, which, in turn, is included by `gc_priv.h` file.
171
 */
172
#ifndef GC_NO_VERSION_VAR
173
EXTERN_C_BEGIN
174
extern const GC_VERSION_VAL_T GC_version;
175
EXTERN_C_END
176

177
const GC_VERSION_VAL_T GC_version = ((GC_VERSION_VAL_T)GC_VERSION_MAJOR << 16)
178
                                    | (GC_VERSION_MINOR << 8)
179
                                    | GC_VERSION_MICRO;
180
#endif
181

182
GC_API GC_VERSION_VAL_T GC_CALL
183
GC_get_version(void)
2✔
184
{
185
  return ((GC_VERSION_VAL_T)GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8)
2✔
186
         | GC_VERSION_MICRO;
187
}
188

189
GC_API int GC_CALL
190
GC_get_dont_add_byte_at_end(void)
42✔
191
{
192
#if MAX_EXTRA_BYTES > 0
193
  /* This is meaningful only if `GC_all_interior_pointers`. */
194
  return 0;
42✔
195
#else
196
  return 1;
197
#endif
198
}
199

200
/* Some more variables. */
201

202
#ifdef GC_DONT_EXPAND
203
int GC_dont_expand = TRUE;
204
#else
205
int GC_dont_expand = FALSE;
206
#endif
207

208
#if defined(GC_FREE_SPACE_DIVISOR) && !defined(CPPCHECK)
209
word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR; /*< must be positive */
210
#else
211
word GC_free_space_divisor = 3;
212
#endif
213

214
GC_INNER int GC_CALLBACK
215
GC_never_stop_func(void)
7,692,810✔
216
{
217
  return FALSE;
7,692,810✔
218
}
219

220
#if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
221
/*
222
 * We try to keep pause times from exceeding this by much.
223
 * In milliseconds.
224
 */
225
unsigned long GC_time_limit = GC_TIME_LIMIT;
226
#elif defined(PARALLEL_MARK)
227
/*
228
 * The parallel marker cannot be interrupted for now, so the time limit
229
 * is absent by default.
230
 */
231
unsigned long GC_time_limit = GC_TIME_UNLIMITED;
232
#else
233
unsigned long GC_time_limit = 15;
234
#endif
235

236
#ifndef NO_CLOCK
237
/*
238
 * The nanoseconds add-on to `GC_time_limit` value.
239
 * Not updated by `GC_set_time_limit()`.
240
 * Ignored if the value of `GC_time_limit` is `GC_TIME_UNLIMITED`.
241
 */
242
STATIC unsigned long GC_time_lim_nsec = 0;
243

244
#  define TV_NSEC_LIMIT (1000UL * 1000) /*< amount of nanoseconds in 1 ms */
245

246
GC_API void GC_CALL
247
GC_set_time_limit_tv(struct GC_timeval_s tv)
2✔
248
{
249
  GC_ASSERT(tv.tv_ms <= GC_TIME_UNLIMITED);
2✔
250
  GC_ASSERT(tv.tv_nsec < TV_NSEC_LIMIT);
2✔
251
  GC_time_limit = tv.tv_ms;
2✔
252
  GC_time_lim_nsec = tv.tv_nsec;
2✔
253
}
2✔
254

255
GC_API struct GC_timeval_s GC_CALL
256
GC_get_time_limit_tv(void)
2✔
257
{
258
  struct GC_timeval_s tv;
259

260
  tv.tv_ms = GC_time_limit;
2✔
261
  tv.tv_nsec = GC_time_lim_nsec;
2✔
262
  return tv;
2✔
263
}
264
#endif /* !NO_CLOCK */
265

266
/* Note: accessed holding the allocator lock. */
267
STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
268

269
GC_API void GC_CALL
270
GC_set_stop_func(GC_stop_func stop_func)
2✔
271
{
272
  GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
2✔
273
  LOCK();
2✔
274
  GC_default_stop_func = stop_func;
2✔
275
  UNLOCK();
2✔
276
}
2✔
277

278
GC_API GC_stop_func GC_CALL
279
GC_get_stop_func(void)
2✔
280
{
281
  GC_stop_func stop_func;
282

283
  READER_LOCK();
2✔
284
  stop_func = GC_default_stop_func;
2✔
285
  READER_UNLOCK();
2✔
286
  return stop_func;
2✔
287
}
288

289
#if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
290
#  define GC_timeout_stop_func GC_default_stop_func
291
#else
292
STATIC int GC_CALLBACK
293
GC_timeout_stop_func(void)
6,667,261✔
294
{
295
  CLOCK_TYPE current_time;
296
  static unsigned count = 0;
297
  unsigned long time_diff, nsec_diff;
298

299
  GC_ASSERT(I_HOLD_LOCK());
6,667,261✔
300
  if (GC_default_stop_func())
6,667,261✔
301
    return TRUE;
6,667,261✔
302

303
  if (GC_time_limit == GC_TIME_UNLIMITED || (count++ & 3) != 0)
6,667,261✔
304
    return FALSE;
6,667,261✔
305

306
  GET_TIME(current_time);
×
307
  time_diff = MS_TIME_DIFF(current_time, GC_start_time);
×
308
  nsec_diff = NS_FRAC_TIME_DIFF(current_time, GC_start_time);
×
309
#  if defined(CPPCHECK)
310
  GC_noop1_ptr(&nsec_diff);
311
#  endif
312
  if (time_diff >= GC_time_limit
×
313
      && (time_diff > GC_time_limit || nsec_diff >= GC_time_lim_nsec)) {
×
314
    GC_COND_LOG_PRINTF(
×
315
        "Abandoning stopped marking after %lu ms %lu ns (attempt %d)\n",
316
        time_diff, nsec_diff, GC_n_attempts);
317
    return TRUE;
×
318
  }
319

320
  return FALSE;
×
321
}
322
#endif /* !GC_DISABLE_INCREMENTAL */
323

324
#ifdef THREADS
325
GC_INNER word GC_total_stacksize = 0;
326
#endif
327

328
/* The lowest value returned by `min_bytes_allocd()`. */
329
static size_t min_bytes_allocd_minimum = 1;
330

331
GC_API void GC_CALL
332
GC_set_min_bytes_allocd(size_t value)
2✔
333
{
334
  GC_ASSERT(value > 0);
2✔
335
  min_bytes_allocd_minimum = value;
2✔
336
}
2✔
337

338
GC_API size_t GC_CALL
339
GC_get_min_bytes_allocd(void)
2✔
340
{
341
  return min_bytes_allocd_minimum;
2✔
342
}
343

344
/*
345
 * Return the minimum number of bytes that must be allocated between
346
 * collections to amortize the cost of the latter.  Should be nonzero.
347
 */
348
static word
349
min_bytes_allocd(void)
39,767✔
350
{
351
  word result;
352
  word stack_size;
353
  /*
354
   * Total size of roots, it includes double stack size, since the stack
355
   * is expensive to scan.
356
   */
357
  word total_root_size;
358
  /* Estimate of memory to be scanned during normal collection. */
359
  word scan_size;
360

361
  GC_ASSERT(I_HOLD_LOCK());
39,767✔
362
#ifdef THREADS
363
  if (GC_need_to_lock) {
39,767✔
364
    /* We are multi-threaded... */
365
    stack_size = GC_total_stacksize;
15,379✔
366
    /*
367
     * For now, we just use the value computed during the latest garbage
368
     * collection.
369
     */
370
#  ifdef DEBUG_THREADS
371
    GC_log_printf("Total stacks size: %lu\n", (unsigned long)stack_size);
372
#  endif
373
  } else
374
#endif
375
  /* else */ {
376
#ifdef STACK_NOT_SCANNED
377
    stack_size = 0;
378
#elif defined(STACK_GROWS_UP)
379
    stack_size = (word)(GC_approx_sp() - GC_stackbottom);
380
#else
381
    stack_size = (word)(GC_stackbottom - GC_approx_sp());
24,388✔
382
#endif
383
  }
384

385
  total_root_size = 2 * stack_size + GC_root_size;
39,767✔
386
  scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4 + total_root_size;
39,767✔
387
  result = scan_size / GC_free_space_divisor;
39,767✔
388
  if (GC_incremental) {
39,767✔
389
    result /= 2;
31,123✔
390
  }
391
  return result > min_bytes_allocd_minimum ? result : min_bytes_allocd_minimum;
39,767✔
392
}
393

394
/*
395
 * Return the number of bytes allocated, adjusted for explicit storage
396
 * management, etc.  This number is used in deciding when to trigger
397
 * collections.
398
 */
399
STATIC word
400
GC_adj_bytes_allocd(void)
6,903,351✔
401
{
402
  GC_signed_word result;
403
  GC_signed_word expl_managed = (GC_signed_word)GC_non_gc_bytes
6,903,351✔
404
                                - (GC_signed_word)GC_non_gc_bytes_at_gc;
6,903,351✔
405

406
  /*
407
   * Do not count what was explicitly freed, or newly allocated for
408
   * explicit management.  Note that deallocating an explicitly managed
409
   * object should not alter result, assuming the client is playing by
410
   * the rules.
411
   */
412
  result = (GC_signed_word)GC_bytes_allocd + (GC_signed_word)GC_bytes_dropped
6,903,351✔
413
           - (GC_signed_word)GC_bytes_freed
6,903,351✔
414
           + (GC_signed_word)GC_finalizer_bytes_freed - expl_managed;
6,903,351✔
415
  if (result > (GC_signed_word)GC_bytes_allocd) {
6,903,351✔
416
    /* Probably a client bug or unfortunate scheduling. */
417
    result = (GC_signed_word)GC_bytes_allocd;
486,917✔
418
  }
419
  /*
420
   * We count objects enqueued for finalization as though they had been
421
   * reallocated this round.  Finalization is visible to user.
422
   * And if we do not count this, we have stability problems for programs
423
   * that finalize all objects.
424
   */
425
  result += (GC_signed_word)GC_bytes_finalized;
6,903,351✔
426
  if (result < (GC_signed_word)(GC_bytes_allocd >> 3)) {
6,903,351✔
427
    /*
428
     * Always count at least 1/8 of the allocations.  We do not want to
429
     * collect too infrequently, since that would inhibit coalescing of
430
     * free storage blocks.  This also makes us partially robust against
431
     * client bugs.
432
     */
433
    result = (GC_signed_word)(GC_bytes_allocd >> 3);
5,924✔
434
  }
435
  return (word)result;
6,903,351✔
436
}
437

438
/*
439
 * Clear up a few frames worth of garbage left at the top of the stack.
440
 * This is used to prevent us from accidentally treating garbage left
441
 * on the stack by other parts of the collector as roots.
442
 * This differs from the code in `misc.c` file, which actually tries
443
 * to keep the stack clear of long-lived, client-generated garbage.
444
 */
445
STATIC void
446
GC_clear_a_few_frames(void)
25,823✔
447
{
448
#ifndef CLEAR_STACK_NPTRS
449
#  define CLEAR_STACK_NPTRS 64 /*< pointers */
450
#endif
451
  volatile ptr_t frames[CLEAR_STACK_NPTRS];
452

453
  BZERO(CAST_AWAY_VOLATILE_PVOID(frames), sizeof(frames));
25,823✔
454
}
25,823✔
455

456
GC_API void GC_CALL
457
GC_start_incremental_collection(void)
×
458
{
459
#ifndef GC_DISABLE_INCREMENTAL
460
  LOCK();
×
461
  if (GC_incremental) {
×
462
    GC_should_start_incremental_collection = TRUE;
×
463
    if (!GC_dont_gc) {
×
464
      GC_collect_a_little_inner(1);
×
465
    }
466
  }
467
  UNLOCK();
×
468
#endif
469
}
×
470

471
GC_INNER GC_bool
472
GC_should_collect(void)
6,907,495✔
473
{
474
  static word last_min_bytes_allocd, last_gc_no;
475

476
  GC_ASSERT(I_HOLD_LOCK());
6,907,495✔
477
  if (last_gc_no != GC_gc_no) {
6,907,495✔
478
    last_min_bytes_allocd = min_bytes_allocd();
25,409✔
479
    last_gc_no = GC_gc_no;
25,409✔
480
  }
481
#ifndef GC_DISABLE_INCREMENTAL
482
  if (GC_should_start_incremental_collection) {
6,907,495✔
483
    GC_should_start_incremental_collection = FALSE;
×
484
    return TRUE;
×
485
  }
486
#endif
487
  if (GC_disable_automatic_collection)
6,907,495✔
488
    return FALSE;
×
489

490
  if (GC_last_heap_growth_gc_no == GC_gc_no) {
6,907,495✔
491
    /* Avoid expanding past limits used by black-listing. */
492
    return TRUE;
4,144✔
493
  }
494

495
  return GC_adj_bytes_allocd() >= last_min_bytes_allocd;
6,903,351✔
496
}
497

498
/*
499
 * Called at start of full collections.  Not called if zero.
500
 * Called with the allocator lock held.  Not used by the collector itself.
501
 */
502
/* `STATIC` */ GC_start_callback_proc GC_start_call_back = 0;
503

504
GC_API void GC_CALL
505
GC_set_start_callback(GC_start_callback_proc fn)
2✔
506
{
507
  LOCK();
2✔
508
  GC_start_call_back = fn;
2✔
509
  UNLOCK();
2✔
510
}
2✔
511

512
GC_API GC_start_callback_proc GC_CALL
513
GC_get_start_callback(void)
2✔
514
{
515
  GC_start_callback_proc fn;
516

517
  READER_LOCK();
2✔
518
  fn = GC_start_call_back;
2✔
519
  READER_UNLOCK();
2✔
520
  return fn;
2✔
521
}
522

523
GC_INLINE void
524
GC_notify_full_gc(void)
11,996✔
525
{
526
  if (GC_start_call_back != 0) {
11,996✔
527
    (*GC_start_call_back)();
×
528
  }
529
}
11,996✔
530

531
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
532
STATIC void GC_finish_collection(void);
533

534
/*
535
 * Initiate a garbage collection if appropriate.  Choose judiciously
536
 * between partial, full, and stop-world collections.
537
 */
538
STATIC void
539
GC_maybe_gc(void)
6,420,328✔
540
{
541
  GC_ASSERT(I_HOLD_LOCK());
6,420,328✔
542
  ASSERT_CANCEL_DISABLED();
6,420,328✔
543
  if (!GC_should_collect())
6,420,328✔
544
    return;
6,405,490✔
545

546
  if (!GC_incremental) {
14,838✔
547
    GC_gcollect_inner();
×
548
    return;
×
549
  }
550

551
  GC_ASSERT(!GC_collection_in_progress());
14,838✔
552
#ifdef PARALLEL_MARK
553
  if (GC_parallel)
14,838✔
554
    GC_wait_for_reclaim();
6,830✔
555
#endif
556
  if (GC_need_full_gc || GC_n_partial_gcs >= GC_full_freq) {
14,838✔
557
    GC_COND_LOG_PRINTF(
1,011✔
558
        "***>Full mark for collection #%lu after %lu bytes allocated\n",
559
        (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
560
    GC_notify_full_gc();
1,011✔
561
    ENTER_GC();
1,011✔
562
    GC_promote_black_lists();
1,011✔
563
    (void)GC_reclaim_all((GC_stop_func)0, TRUE);
1,011✔
564
    GC_clear_marks();
1,011✔
565
    EXIT_GC();
1,011✔
566
    GC_n_partial_gcs = 0;
1,011✔
567
    GC_is_full_gc = TRUE;
1,011✔
568
  } else {
569
    GC_n_partial_gcs++;
13,827✔
570
  }
571

572
  /*
573
   * Try to mark with the world stopped.  If we run out of time, then this
574
   * turns into an incremental marking.
575
   */
576
#ifndef NO_CLOCK
577
  if (GC_time_limit != GC_TIME_UNLIMITED)
14,838✔
578
    GET_TIME(GC_start_time);
×
579
#endif
580
  if (GC_stopped_mark(GC_timeout_stop_func)) {
14,838✔
581
    SAVE_CALLERS_TO_LAST_STACK();
582
    GC_finish_collection();
14,838✔
583
  } else if (!GC_is_full_gc) {
×
584
    /* Count this as the first attempt. */
585
    GC_n_attempts++;
×
586
  }
587
}
588

589
STATIC GC_on_collection_event_proc GC_on_collection_event = 0;
590

591
GC_API void GC_CALL
592
GC_set_on_collection_event(GC_on_collection_event_proc fn)
2✔
593
{
594
  /* `fn` may be 0 (means no event notifier). */
595
  LOCK();
2✔
596
  GC_on_collection_event = fn;
2✔
597
  UNLOCK();
2✔
598
}
2✔
599

600
GC_API GC_on_collection_event_proc GC_CALL
601
GC_get_on_collection_event(void)
2✔
602
{
603
  GC_on_collection_event_proc fn;
604

605
  READER_LOCK();
2✔
606
  fn = GC_on_collection_event;
2✔
607
  READER_UNLOCK();
2✔
608
  return fn;
2✔
609
}
610

611
GC_INNER GC_bool
612
GC_try_to_collect_inner(GC_stop_func stop_func)
11,002✔
613
{
614
#ifndef NO_CLOCK
615
  CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
11,002✔
616
  GC_bool start_time_valid;
617
#endif
618

619
  ASSERT_CANCEL_DISABLED();
11,002✔
620
  GC_ASSERT(I_HOLD_LOCK());
11,002✔
621
  GC_ASSERT(GC_is_initialized);
11,002✔
622
  if (GC_dont_gc || (*stop_func)())
11,002✔
623
    return FALSE;
11,002✔
624
  if (GC_on_collection_event)
10,985✔
625
    GC_on_collection_event(GC_EVENT_START);
×
626
  if (GC_incremental && GC_collection_in_progress()) {
10,985✔
627
    GC_COND_LOG_PRINTF(
×
628
        "GC_try_to_collect_inner: finishing collection in progress\n");
629
    /* Just finish collection already in progress. */
630
    do {
631
      if ((*stop_func)()) {
×
632
        /* TODO: Notify `GC_EVENT_ABANDON`. */
633
        return FALSE;
×
634
      }
635
      GC_collect_a_little_inner(1);
×
636
    } while (GC_collection_in_progress());
×
637
  }
638
  GC_notify_full_gc();
10,985✔
639
#ifndef NO_CLOCK
640
  start_time_valid = FALSE;
10,985✔
641
  if ((GC_print_stats | (int)GC_measure_performance) != 0) {
10,985✔
642
    if (GC_print_stats)
2,287✔
643
      GC_log_printf("Initiating full world-stop collection!\n");
×
644
    start_time_valid = TRUE;
2,287✔
645
    GET_TIME(start_time);
2,287✔
646
  }
647
#endif
648
  GC_promote_black_lists();
10,985✔
649
  /*
650
   * Make sure all blocks have been reclaimed, so sweep routines do not
651
   * see cleared mark bits.  If we are guaranteed to finish, then this
652
   * is unnecessary.  In the find-leak case, we have to finish to
653
   * guarantee that previously unmarked objects are not reported as leaks.
654
   */
655
#ifdef PARALLEL_MARK
656
  if (GC_parallel)
10,985✔
657
    GC_wait_for_reclaim();
3,989✔
658
#endif
659
  ENTER_GC();
10,985✔
660
  if ((GC_find_leak_inner || stop_func != GC_never_stop_func)
10,985✔
661
      && !GC_reclaim_all(stop_func, FALSE)) {
66✔
662
    /* Aborted.  So far everything is still consistent. */
663
    EXIT_GC();
×
664
    /* TODO: Notify `GC_EVENT_ABANDON`. */
665
    return FALSE;
×
666
  }
667
  GC_invalidate_mark_state(); /*< flush mark stack */
10,985✔
668
  GC_clear_marks();
10,985✔
669
  SAVE_CALLERS_TO_LAST_STACK();
670
  GC_is_full_gc = TRUE;
10,985✔
671
  EXIT_GC();
10,985✔
672
  if (!GC_stopped_mark(stop_func)) {
10,985✔
673
    if (!GC_incremental) {
×
674
      /*
675
       * We are partially done and have no way to complete or use
676
       * current work.  Reestablish invariants as cheaply as possible.
677
       */
678
      GC_invalidate_mark_state();
×
679
      GC_unpromote_black_lists();
×
680
    } else {
681
      /*
682
       * We claim the world is already (or still) consistent.
683
       * We will finish incrementally.
684
       */
685
    }
686
    /* TODO: Notify `GC_EVENT_ABANDON`. */
687
    return FALSE;
×
688
  }
689
  GC_finish_collection();
10,985✔
690
#ifndef NO_CLOCK
691
  if (start_time_valid) {
10,985✔
692
    CLOCK_TYPE current_time;
693
    unsigned long time_diff, ns_frac_diff;
694

695
    GET_TIME(current_time);
2,287✔
696
    time_diff = MS_TIME_DIFF(current_time, start_time);
2,287✔
697
    ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
2,287✔
698
    if (GC_measure_performance) {
2,287✔
699
      GC_full_gc_total_time += time_diff; /*< may wrap */
2,287✔
700
      GC_full_gc_total_ns_frac += (unsigned32)ns_frac_diff;
2,287✔
701
      if (GC_full_gc_total_ns_frac >= (unsigned32)1000000UL) {
2,287✔
702
        /* Overflow of the nanoseconds part. */
703
        GC_full_gc_total_ns_frac -= (unsigned32)1000000UL;
1,125✔
704
        GC_full_gc_total_time++;
1,125✔
705
      }
706
    }
707
    if (GC_print_stats)
2,287✔
708
      GC_log_printf("Complete collection took %lu ms %lu ns\n", time_diff,
×
709
                    ns_frac_diff);
710
  }
711
#endif
712
  if (GC_on_collection_event)
10,985✔
713
    GC_on_collection_event(GC_EVENT_END);
×
714
  return TRUE;
10,985✔
715
}
716

717
/* The default value of `GC_rate`. */
718
#ifndef GC_RATE
719
#  define GC_RATE 10
720
#endif
721

722
/*
723
 * When `GC_collect_a_little_inner()` performs `n_blocks` units of garbage
724
 * collection work, a unit is intended to touch roughly `GC_rate` pages.
725
 * (But, every once in a while, we do more than that.)  This needs to be
726
 * a fairly large number with our current incremental collection strategy,
727
 * since otherwise we allocate too much during garbage collection, and
728
 * the cleanup gets expensive.
729
 */
730
STATIC unsigned GC_rate = GC_RATE;
731

732
GC_API void GC_CALL
733
GC_set_rate(int value)
2✔
734
{
735
  GC_ASSERT(value > 0);
2✔
736
  GC_rate = (unsigned)value;
2✔
737
}
2✔
738

739
GC_API int GC_CALL
740
GC_get_rate(void)
2✔
741
{
742
  return (int)GC_rate;
2✔
743
}
744

745
/* The default maximum number of prior attempts at world stop marking. */
746
#ifndef MAX_PRIOR_ATTEMPTS
747
#  define MAX_PRIOR_ATTEMPTS 3
748
#endif
749

750
/*
751
 * The maximum number of prior attempts at world stop marking.
752
 * A value of 1 means that we finish the second time, no matter how long
753
 * it takes.  Does not count the initial root scan for a full collection.
754
 */
755
static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
756

757
GC_API void GC_CALL
758
GC_set_max_prior_attempts(int value)
2✔
759
{
760
  GC_ASSERT(value >= 0);
2✔
761
  max_prior_attempts = value;
2✔
762
}
2✔
763

764
GC_API int GC_CALL
765
GC_get_max_prior_attempts(void)
2✔
766
{
767
  return max_prior_attempts;
2✔
768
}
769

770
GC_INNER void
771
GC_collect_a_little_inner(size_t n_blocks)
6,443,887✔
772
{
773
  IF_CANCEL(int cancel_state;)
774

775
  GC_ASSERT(I_HOLD_LOCK());
6,443,887✔
776
  GC_ASSERT(GC_is_initialized);
6,443,887✔
777
  DISABLE_CANCEL(cancel_state);
6,443,887✔
778
  if (GC_incremental && GC_collection_in_progress()) {
6,443,887✔
779
    size_t i;
780
    size_t max_deficit = GC_rate * n_blocks;
×
781

782
    ENTER_GC();
×
783
#ifdef PARALLEL_MARK
784
    if (GC_time_limit != GC_TIME_UNLIMITED)
×
785
      GC_parallel_mark_disabled = TRUE;
×
786
#endif
787
    for (i = GC_mark_deficit; i < max_deficit; i++) {
×
788
      if (GC_mark_some(NULL))
×
789
        break;
×
790
    }
791
#ifdef PARALLEL_MARK
792
    GC_parallel_mark_disabled = FALSE;
×
793
#endif
794
    EXIT_GC();
×
795

796
    if (i < max_deficit && !GC_dont_gc) {
×
797
      GC_ASSERT(!GC_collection_in_progress());
×
798
      /* Need to follow up with a full collection. */
799
      SAVE_CALLERS_TO_LAST_STACK();
800
#ifdef PARALLEL_MARK
801
      if (GC_parallel)
×
802
        GC_wait_for_reclaim();
×
803
#endif
804
#ifndef NO_CLOCK
805
      if (GC_time_limit != GC_TIME_UNLIMITED
×
806
          && GC_n_attempts < max_prior_attempts)
×
807
        GET_TIME(GC_start_time);
×
808
#endif
809
      if (GC_stopped_mark(GC_n_attempts < max_prior_attempts
×
810
                              ? GC_timeout_stop_func
811
                              : GC_never_stop_func)) {
812
        GC_finish_collection();
×
813
      } else {
814
        GC_n_attempts++;
×
815
      }
816
    }
817
    if (GC_mark_deficit > 0) {
×
818
      GC_mark_deficit
819
          = GC_mark_deficit > max_deficit ? GC_mark_deficit - max_deficit : 0;
×
820
    }
821
  } else if (!GC_dont_gc) {
6,443,887✔
822
    GC_maybe_gc();
6,420,328✔
823
  }
824
  RESTORE_CANCEL(cancel_state);
6,443,887✔
825
}
6,443,887✔
826

827
#if !defined(NO_FIND_LEAK) || !defined(SHORT_DBG_HDRS)
828
GC_INNER void (*GC_check_heap)(void) = 0;
829
GC_INNER void (*GC_print_all_smashed)(void) = 0;
830
#endif
831

832
GC_API int GC_CALL
833
GC_collect_a_little(void)
2,635,467✔
834
{
835
  int result;
836

837
  if (UNLIKELY(!GC_is_initialized))
2,635,467✔
838
    GC_init();
×
839
  LOCK();
2,635,467✔
840
  /*
841
   * Note: if the collection is in progress, this may do marking (not
842
   * stopping the world) even in case of disabled garbage collection.
843
   */
844
  GC_collect_a_little_inner(1);
2,635,467✔
845
  result = (int)GC_collection_in_progress();
2,635,467✔
846
  UNLOCK();
2,635,467✔
847
  if (GC_debugging_started && !result)
2,635,467✔
848
    GC_print_all_smashed();
×
849
  return result;
2,635,467✔
850
}
851

852
#ifdef THREADS
853
GC_API void GC_CALL
854
GC_stop_world_external(void)
2✔
855
{
856
  GC_ASSERT(GC_is_initialized);
2✔
857
  LOCK();
2✔
858
#  ifdef THREAD_LOCAL_ALLOC
859
  GC_ASSERT(!GC_world_stopped);
2✔
860
#  endif
861
  ENTER_GC();
2✔
862
  STOP_WORLD();
2✔
863
#  ifdef THREAD_LOCAL_ALLOC
864
  GC_world_stopped = TRUE;
2✔
865
#  endif
866
}
2✔
867

868
GC_API void GC_CALL
869
GC_start_world_external(void)
2✔
870
{
871
#  ifdef THREAD_LOCAL_ALLOC
872
  GC_ASSERT(GC_world_stopped);
2✔
873
  GC_world_stopped = FALSE;
2✔
874
#  else
875
  GC_ASSERT(GC_is_initialized);
876
#  endif
877
  START_WORLD();
2✔
878
  EXIT_GC();
2✔
879
  UNLOCK();
2✔
880
}
2✔
881
#endif /* THREADS */
882

883
#ifdef USE_MUNMAP
884
#  ifndef MUNMAP_THRESHOLD
885
#    define MUNMAP_THRESHOLD 7
886
#  endif
887
GC_INNER unsigned GC_unmap_threshold = MUNMAP_THRESHOLD;
888

889
#  define IF_USE_MUNMAP(x) x
890
#  define COMMA_IF_USE_MUNMAP(x) /* comma */ , x
891
#else
892
#  define IF_USE_MUNMAP(x)
893
#  define COMMA_IF_USE_MUNMAP(x)
894
#endif /* !USE_MUNMAP */
895

896
/*
897
 * We stop the world and mark from all roots.  If `stop_func()` ever
898
 * returns `TRUE`, we may fail and return `FALSE`.  Increment `GC_gc_no`
899
 * if we succeed.
900
 */
901
STATIC GC_bool
902
GC_stopped_mark(GC_stop_func stop_func)
25,823✔
903
{
904
  ptr_t cold_gc_frame = GC_approx_sp();
25,823✔
905
  unsigned abandoned_at;
906
#ifndef NO_CLOCK
907
  CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
25,823✔
908
  GC_bool start_time_valid = FALSE;
25,823✔
909
#endif
910

911
  GC_ASSERT(I_HOLD_LOCK());
25,823✔
912
  GC_ASSERT(GC_is_initialized);
25,823✔
913
  ENTER_GC();
25,823✔
914
#if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
915
  GC_add_current_malloc_heap();
916
#endif
917
#if defined(REGISTER_LIBRARIES_EARLY)
918
  GC_cond_register_dynamic_libraries();
25,823✔
919
#endif
920

921
#if !defined(GC_NO_FINALIZATION) && !defined(GC_TOGGLE_REFS_NOT_NEEDED)
922
  GC_process_togglerefs();
25,823✔
923
#endif
924

925
  /* Output blank line for convenience here. */
926
  GC_COND_LOG_PRINTF(
25,823✔
927
      "\n--> Marking for collection #%lu after %lu allocated bytes\n",
928
      (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
929
#ifndef NO_CLOCK
930
  if (GC_PRINT_STATS_FLAG || GC_measure_performance) {
25,823✔
931
    GET_TIME(start_time);
8,990✔
932
    start_time_valid = TRUE;
8,990✔
933
  }
934
#endif
935
#ifdef THREADS
936
  if (GC_on_collection_event)
25,823✔
937
    GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD);
×
938
#endif
939
  STOP_WORLD();
25,823✔
940
#ifdef THREADS
941
  if (GC_on_collection_event)
25,823✔
942
    GC_on_collection_event(GC_EVENT_POST_STOP_WORLD);
×
943
#  ifdef THREAD_LOCAL_ALLOC
944
  GC_world_stopped = TRUE;
25,823✔
945
#  elif defined(CPPCHECK)
946
  /* Workaround a warning about adjacent same `if` condition. */
947
  (void)0;
948
#  endif
949
#endif
950

951
#ifdef MAKE_BACK_GRAPH
952
  if (GC_print_back_height) {
953
    GC_build_back_graph();
954
  }
955
#endif
956

957
  /* Notify about marking from all roots. */
958
  if (GC_on_collection_event)
25,823✔
959
    GC_on_collection_event(GC_EVENT_MARK_START);
×
960

961
  /* Minimize junk left in my registers and on the stack. */
962
  GC_clear_a_few_frames();
25,823✔
963
  GC_noop6(0, 0, 0, 0, 0, 0);
25,823✔
964

965
  GC_initiate_gc();
25,823✔
966
#ifdef PARALLEL_MARK
967
  if (stop_func != GC_never_stop_func)
25,823✔
968
    GC_parallel_mark_disabled = TRUE;
14,838✔
969
#endif
970
  for (abandoned_at = 1; !(*stop_func)(); abandoned_at++) {
7,681,187✔
971
    if (GC_mark_some(cold_gc_frame)) {
7,681,187✔
972
#ifdef PARALLEL_MARK
973
      if (GC_parallel && GC_parallel_mark_disabled) {
25,823✔
974
        GC_COND_LOG_PRINTF("Stopped marking done after %u iterations"
6,830✔
975
                           " with disabled parallel marker\n",
976
                           abandoned_at - 1);
977
      }
978
#endif
979
      abandoned_at = 0;
25,823✔
980
      break;
25,823✔
981
    }
982
  }
983
#ifdef PARALLEL_MARK
984
  GC_parallel_mark_disabled = FALSE;
25,823✔
985
#endif
986

987
  if (abandoned_at > 0) {
25,823✔
988
    /* Give the mutator a chance. */
989
    GC_mark_deficit = abandoned_at - 1;
×
990
    /* TODO: Notify `GC_EVENT_MARK_ABANDON`. */
991
  } else {
992
    GC_gc_no++;
25,823✔
993
    /* Check all debugged objects for consistency. */
994
    if (GC_debugging_started)
25,823✔
995
      GC_check_heap();
249✔
996
    if (GC_on_collection_event)
25,823✔
997
      GC_on_collection_event(GC_EVENT_MARK_END);
×
998
  }
999

1000
#ifdef THREADS
1001
  if (GC_on_collection_event)
25,823✔
1002
    GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
×
1003
#endif
1004
#ifdef THREAD_LOCAL_ALLOC
1005
  GC_world_stopped = FALSE;
25,823✔
1006
#endif
1007
  START_WORLD();
25,823✔
1008
#ifdef THREADS
1009
  if (GC_on_collection_event)
25,823✔
1010
    GC_on_collection_event(GC_EVENT_POST_START_WORLD);
×
1011
#endif
1012

1013
#ifndef NO_CLOCK
1014
  if (start_time_valid) {
25,823✔
1015
    CLOCK_TYPE current_time;
1016
    unsigned long time_diff, ns_frac_diff;
1017

1018
    /* TODO: Avoid code duplication from `GC_try_to_collect_inner`. */
1019
    GET_TIME(current_time);
8,990✔
1020
    time_diff = MS_TIME_DIFF(current_time, start_time);
8,990✔
1021
    ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
8,990✔
1022
    if (GC_measure_performance) {
8,990✔
1023
      GC_stopped_mark_total_time += time_diff; /*< may wrap */
8,990✔
1024
      GC_stopped_mark_total_ns_frac += (unsigned32)ns_frac_diff;
8,990✔
1025
      if (GC_stopped_mark_total_ns_frac >= (unsigned32)1000000UL) {
8,990✔
1026
        GC_stopped_mark_total_ns_frac -= (unsigned32)1000000UL;
3,741✔
1027
        GC_stopped_mark_total_time++;
3,741✔
1028
      }
1029
    }
1030

1031
    if (GC_PRINT_STATS_FLAG || GC_measure_performance) {
8,990✔
1032
      unsigned total_time = GC_world_stopped_total_time;
8,990✔
1033
      unsigned divisor = GC_world_stopped_total_divisor;
8,990✔
1034

1035
      /* Compute new world-stop delay total time. */
1036
      if (total_time > (((unsigned)-1) >> 1)
8,990✔
1037
          || divisor >= MAX_TOTAL_TIME_DIVISOR) {
8,990✔
1038
        /* Halve values if overflow occurs. */
1039
        total_time >>= 1;
17✔
1040
        divisor >>= 1;
17✔
1041
      }
1042
      total_time += time_diff < (((unsigned)-1) >> 1) ? (unsigned)time_diff
8,990✔
1043
                                                      : ((unsigned)-1) >> 1;
8,990✔
1044
      /* Update old `GC_world_stopped_total_time` and its divisor. */
1045
      GC_world_stopped_total_time = total_time;
8,990✔
1046
      GC_world_stopped_total_divisor = ++divisor;
8,990✔
1047
      if (GC_PRINT_STATS_FLAG && 0 == abandoned_at) {
8,990✔
1048
        GC_ASSERT(divisor != 0);
×
1049
        GC_log_printf(
×
1050
            "World-stopped marking took %lu ms %lu ns (%u ms in average)\n",
1051
            time_diff, ns_frac_diff, total_time / divisor);
1052
      }
1053
    }
1054
  }
1055
#endif
1056

1057
  EXIT_GC();
25,823✔
1058
  if (0 == abandoned_at)
25,823✔
1059
    return TRUE;
25,823✔
1060
  GC_COND_LOG_PRINTF("Abandoned stopped marking after %u iterations\n",
×
1061
                     abandoned_at - 1);
1062
  return FALSE;
×
1063
}
1064

1065
GC_INNER void
1066
GC_set_fl_marks(ptr_t q)
1,105,323✔
1067
{
1068
#ifdef GC_ASSERTIONS
1069
  ptr_t q2;
1070
#endif
1071
  struct hblk *h = HBLKPTR(q);
1,105,323✔
1072
  const struct hblk *last_h = h;
1,105,323✔
1073
  hdr *hhdr;
1074
#ifdef MARK_BIT_PER_OBJ
1075
  size_t sz;
1076
#endif
1077

1078
  GC_ASSERT(q != NULL);
1,105,323✔
1079
  hhdr = HDR(h);
1,105,323✔
1080
#ifdef MARK_BIT_PER_OBJ
1081
  sz = hhdr->hb_sz;
1082
#endif
1083
#ifdef GC_ASSERTIONS
1084
  q2 = (ptr_t)obj_link(q);
1,105,323✔
1085
#endif
1086
  for (;;) {
29,370,204✔
1087
    size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz);
30,475,527✔
1088

1089
    if (!mark_bit_from_hdr(hhdr, bit_no)) {
30,475,527✔
1090
      set_mark_bit_from_hdr(hhdr, bit_no);
15,680,272✔
1091
      INCR_MARKS(hhdr);
15,680,272✔
1092
    }
1093
    q = (ptr_t)obj_link(q);
30,475,527✔
1094
    if (NULL == q)
30,475,527✔
1095
      break;
1,105,323✔
1096
#ifdef GC_ASSERTIONS
1097
    /*
1098
     * Detect a cycle in the free list.  The algorithm is to have
1099
     * a "twice faster" iterator over the list which meets the first
1100
     * one in case of a cycle existing in the list.
1101
     */
1102
    if (q2 != NULL) {
29,370,204✔
1103
      q2 = (ptr_t)obj_link(q2);
14,944,286✔
1104
      GC_ASSERT(q2 != q);
14,944,286✔
1105
      if (q2 != NULL) {
14,944,286✔
1106
        q2 = (ptr_t)obj_link(q2);
14,425,918✔
1107
        GC_ASSERT(q2 != q);
14,425,918✔
1108
      }
1109
    }
1110
#endif
1111

1112
    h = HBLKPTR(q);
29,370,204✔
1113
    if (UNLIKELY(h != last_h)) {
29,370,204✔
1114
      last_h = h;
148,312✔
1115
      /* Update `hhdr` and `sz`. */
1116
      hhdr = HDR(h);
148,312✔
1117
#ifdef MARK_BIT_PER_OBJ
1118
      sz = hhdr->hb_sz;
1119
#endif
1120
    }
1121
  }
1122
}
1,105,323✔
1123

1124
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1125
/*
1126
 * Check that all mark bits for the free list, whose first entry is
1127
 * `*pfreelist`, are set.  The check is skipped if `*pfreelist` points to
1128
 * a special value.
1129
 */
1130
void
1131
GC_check_fl_marks(void **pfreelist)
15,686,112✔
1132
{
1133
  /*
1134
   * TODO: There is a data race with `GC_FAST_MALLOC_GRANS` (which does
1135
   * not do atomic updates to the free-list).  The race seems to be
1136
   * harmless, and for now we just skip this check in case of TSan.
1137
   */
1138
#  if defined(AO_HAVE_load_acquire_read) && !defined(THREAD_SANITIZER)
1139
  ptr_t list = GC_cptr_load_acquire_read((volatile ptr_t *)pfreelist);
15,686,112✔
1140
  /* Atomic operations are used because the world is running. */
1141
  ptr_t p, prev, next;
1142

1143
  if (ADDR(list) <= HBLKSIZE)
15,686,112✔
1144
    return;
14,581,339✔
1145

1146
  prev = (ptr_t)pfreelist;
1,104,773✔
1147
  for (p = list; p != NULL; p = next) {
31,558,390✔
1148
    if (!GC_is_marked(p)) {
30,453,617✔
1149
      ABORT_ARG2("Unmarked local free-list entry", ": object %p on list %p",
×
1150
                 (void *)p, (void *)list);
1151
    }
1152

1153
    /*
1154
     * While traversing the free-list, it re-reads the pointer to the
1155
     * current node before accepting its next pointer and bails out
1156
     * if the latter has changed.  That way, it will not try to follow
1157
     * the pointer which might be been modified after the object was
1158
     * returned to the client.  It might perform the mark-check on the
1159
     * just allocated object but that should be harmless.
1160
     */
1161
    next = GC_cptr_load_acquire_read((volatile ptr_t *)p);
30,453,617✔
1162
    if (GC_cptr_load((volatile ptr_t *)prev) != p)
30,453,617✔
1163
      break;
×
1164
    prev = p;
30,453,617✔
1165
  }
1166
#  else
1167
  /* FIXME: Not implemented (just skipped). */
1168
  (void)pfreelist;
1169
#  endif
1170
}
1171
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
1172

1173
/*
1174
 * Clear all mark bits for the free list (specified by the first entry).
1175
 * Decrement `GC_bytes_found` by number of bytes on free list.
1176
 */
1177
STATIC void
1178
GC_clear_fl_marks(ptr_t q)
55,173✔
1179
{
1180
  struct hblk *h = HBLKPTR(q);
55,173✔
1181
  const struct hblk *last_h = h;
55,173✔
1182
  hdr *hhdr = HDR(h);
55,173✔
1183
  size_t sz = hhdr->hb_sz; /*< normally set only once */
55,173✔
1184

1185
  for (;;) {
5,406,212✔
1186
    size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz);
5,461,385✔
1187

1188
    if (mark_bit_from_hdr(hhdr, bit_no)) {
5,461,385✔
1189
      size_t n_marks = hhdr->hb_n_marks;
1,006,519✔
1190

1191
#ifdef LINT2
1192
      if (0 == n_marks)
1193
        ABORT("hhdr->hb_n_marks cannot be zero");
1194
#else
1195
      GC_ASSERT(n_marks != 0);
1,006,519✔
1196
#endif
1197
      clear_mark_bit_from_hdr(hhdr, bit_no);
1,006,519✔
1198
      n_marks--;
1,006,519✔
1199
#ifdef PARALLEL_MARK
1200
      /* Approximate count, do not decrement to zero! */
1201
      if (n_marks != 0 || !GC_parallel) {
1,006,519✔
1202
        hhdr->hb_n_marks = n_marks;
1,005,899✔
1203
      }
1204
#else
1205
      hhdr->hb_n_marks = n_marks;
1206
#endif
1207
    }
1208
    GC_bytes_found -= (GC_signed_word)sz;
5,461,385✔
1209

1210
    q = (ptr_t)obj_link(q);
5,461,385✔
1211
    if (NULL == q)
5,461,385✔
1212
      break;
55,173✔
1213

1214
    h = HBLKPTR(q);
5,406,212✔
1215
    if (UNLIKELY(h != last_h)) {
5,406,212✔
1216
      last_h = h;
315,679✔
1217
      /* Update `hhdr` and `sz`. */
1218
      hhdr = HDR(h);
315,679✔
1219
      sz = hhdr->hb_sz;
315,679✔
1220
    }
1221
  }
1222
}
55,173✔
1223

1224
/* Mark all objects on the free lists for every object kind. */
1225
static void
1226
set_all_fl_marks(void)
66✔
1227
{
1228
  unsigned kind;
1229

1230
  for (kind = 0; kind < GC_n_kinds; kind++) {
330✔
1231
    word size; /*< current object size */
1232

1233
    for (size = 1; size <= MAXOBJGRANULES; size++) {
34,056✔
1234
      ptr_t q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
33,792✔
1235

1236
      if (q != NULL)
33,792✔
1237
        GC_set_fl_marks(q);
541✔
1238
    }
1239
  }
1240
}
66✔
1241

1242
/*
1243
 * Clear free-list mark bits.  Also subtract memory remaining from
1244
 * `GC_bytes_found` count.
1245
 */
1246
static void
1247
clear_all_fl_marks(void)
25,823✔
1248
{
1249
  unsigned kind;
1250

1251
  for (kind = 0; kind < GC_n_kinds; kind++) {
150,434✔
1252
    word size; /*< current object size */
1253

1254
    for (size = 1; size <= MAXOBJGRANULES; size++) {
16,074,819✔
1255
      ptr_t q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
15,950,208✔
1256

1257
      if (q != NULL)
15,950,208✔
1258
        GC_clear_fl_marks(q);
55,173✔
1259
    }
1260
  }
1261
}
25,823✔
1262

1263
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1264
void GC_check_tls(void);
1265
#endif
1266

1267
GC_on_heap_resize_proc GC_on_heap_resize = 0;
1268

1269
/* Used for logging only. */
1270
GC_INLINE int
1271
GC_compute_heap_usage_percent(void)
×
1272
{
1273
  word used = GC_composite_in_use + GC_atomic_in_use + GC_bytes_allocd;
×
1274
  word heap_sz = GC_heapsize - GC_unmapped_bytes;
×
1275
#if defined(CPPCHECK)
1276
  word limit = (GC_WORD_MAX >> 1) / 50; /*< to avoid a false positive */
1277
#else
1278
  const word limit = GC_WORD_MAX / 100;
×
1279
#endif
1280

1281
  return used >= heap_sz ? 0
1282
         : used < limit  ? (int)((used * 100) / heap_sz)
×
1283
                         : (int)(used / (heap_sz / 100));
×
1284
}
1285

1286
#define GC_DBGLOG_PRINT_HEAP_IN_USE()                                        \
1287
  GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n", \
1288
                   GC_compute_heap_usage_percent(),                          \
1289
                   TO_KiB_UL(GC_composite_in_use),                           \
1290
                   TO_KiB_UL(GC_atomic_in_use + GC_bytes_allocd))
1291

1292
/*
1293
 * Finish up a collection.  Assumes mark bits are consistent, but the
1294
 * world is otherwise running.
1295
 */
1296
STATIC void
1297
GC_finish_collection(void)
25,823✔
1298
{
1299
#ifndef NO_CLOCK
1300
  CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
25,823✔
1301
  CLOCK_TYPE finalize_time = CLOCK_TYPE_INITIALIZER;
25,823✔
1302
#endif
1303

1304
  GC_ASSERT(I_HOLD_LOCK());
25,823✔
1305
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC) \
1306
    && !defined(DBG_HDRS_ALL)
1307
  /* Check that we marked some of our own data. */
1308
  GC_check_tls();
25,823✔
1309
  /* TODO: Add more checks. */
1310
#endif
1311

1312
#ifndef NO_CLOCK
1313
  if (GC_print_stats)
25,823✔
1314
    GET_TIME(start_time);
×
1315
#endif
1316
  if (GC_on_collection_event)
25,823✔
1317
    GC_on_collection_event(GC_EVENT_RECLAIM_START);
×
1318

1319
#ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1320
  if (GC_bytes_found > 0)
25,823✔
1321
    GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
25,395✔
1322
#endif
1323
  GC_bytes_found = 0;
25,823✔
1324
#if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1325
  if (GETENV("GC_PRINT_ADDRESS_MAP") != NULL) {
25,823✔
1326
    GC_print_address_map();
×
1327
  }
1328
#endif
1329
  COND_DUMP;
25,823✔
1330
  if (GC_find_leak_inner) {
25,823✔
1331
    set_all_fl_marks();
66✔
1332
    /* This just checks; it does not really reclaim anything. */
1333
    GC_start_reclaim(TRUE);
66✔
1334
  }
1335

1336
#ifndef GC_NO_FINALIZATION
1337
  GC_finalize();
25,823✔
1338
#endif
1339
#ifndef NO_CLOCK
1340
  if (GC_print_stats)
25,823✔
1341
    GET_TIME(finalize_time);
×
1342
#endif
1343
#ifdef MAKE_BACK_GRAPH
1344
  if (GC_print_back_height) {
1345
    GC_traverse_back_graph();
1346
  }
1347
#endif
1348

1349
  /*
1350
   * Clear free-list mark bits, in case they got accidentally marked
1351
   * (or `GC_find_leak` is set and they were intentionally marked).
1352
   * Note that composite objects on free list are cleared, thus
1353
   * accidentally marking a free list is not a problem; but some objects
1354
   * on the list itself might be marked, and the given function call
1355
   * fixes it.
1356
   */
1357
  clear_all_fl_marks();
25,823✔
1358

1359
  GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
25,823✔
1360
                        (long)GC_bytes_found);
×
1361

1362
  /* Reconstruct free lists to contain everything not marked. */
1363
  GC_start_reclaim(FALSE);
25,823✔
1364

1365
#ifdef USE_MUNMAP
1366
  if (GC_unmap_threshold > 0    /*< memory unmapping enabled? */
25,823✔
1367
      && LIKELY(GC_gc_no != 1)) /*< do not unmap during `GC_init` */
25,823✔
1368
    GC_unmap_old(GC_unmap_threshold);
25,789✔
1369

1370
  GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
25,823✔
1371
#endif
1372
  GC_ASSERT(GC_our_mem_bytes >= GC_heapsize);
25,823✔
1373
  GC_DBGLOG_PRINTF(
25,823✔
1374
      "GC #%lu freed %ld bytes, heap %lu KiB (" IF_USE_MUNMAP(
1375
          "+ %lu KiB unmapped ") "+ %lu KiB internal)\n",
1376
      (unsigned long)GC_gc_no, (long)GC_bytes_found,
×
1377
      TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
×
1378
      COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)),
×
1379
      TO_KiB_UL(GC_our_mem_bytes - GC_heapsize + sizeof(GC_arrays)));
×
1380
  GC_DBGLOG_PRINT_HEAP_IN_USE();
25,823✔
1381
  if (GC_is_full_gc) {
25,823✔
1382
    GC_used_heap_size_after_full = GC_heapsize - GC_large_free_bytes;
11,996✔
1383
    GC_need_full_gc = FALSE;
11,996✔
1384
  } else {
1385
    GC_need_full_gc = GC_heapsize - GC_used_heap_size_after_full
27,654✔
1386
                      > min_bytes_allocd() + GC_large_free_bytes;
13,827✔
1387
  }
1388

1389
  /* Reset or increment counters for next cycle. */
1390
  GC_n_attempts = 0;
25,823✔
1391
  GC_is_full_gc = FALSE;
25,823✔
1392
  GC_bytes_allocd_before_gc += GC_bytes_allocd;
25,823✔
1393
  GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
25,823✔
1394
  GC_bytes_allocd = 0;
25,823✔
1395
  GC_bytes_dropped = 0;
25,823✔
1396
  GC_bytes_freed = 0;
25,823✔
1397
  GC_finalizer_bytes_freed = 0;
25,823✔
1398

1399
  if (GC_on_collection_event)
25,823✔
1400
    GC_on_collection_event(GC_EVENT_RECLAIM_END);
×
1401
#ifndef NO_CLOCK
1402
  if (GC_print_stats) {
25,823✔
1403
    CLOCK_TYPE done_time;
1404

1405
    GET_TIME(done_time);
×
1406
#  if !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1407
    /* A convenient place to output finalization statistics. */
1408
    GC_print_finalization_stats();
×
1409
#  endif
1410
    GC_log_printf(
×
1411
        "Finalize and initiate sweep took %lu ms %lu ns + %lu ms %lu ns\n",
1412
        MS_TIME_DIFF(finalize_time, start_time),
×
1413
        NS_FRAC_TIME_DIFF(finalize_time, start_time),
×
1414
        MS_TIME_DIFF(done_time, finalize_time),
×
1415
        NS_FRAC_TIME_DIFF(done_time, finalize_time));
×
1416
  }
1417
#elif !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1418
  if (GC_print_stats)
1419
    GC_print_finalization_stats();
1420
#endif
1421
}
25,823✔
1422

1423
/* Note: if `stop_func` is 0, then `GC_default_stop_func` is used instead. */
1424
STATIC GC_bool
1425
GC_try_to_collect_general(GC_stop_func stop_func, GC_bool force_unmap)
2,458✔
1426
{
1427
  GC_bool result;
1428
#ifdef USE_MUNMAP
1429
  unsigned old_unmap_threshold;
1430
#endif
1431
  IF_CANCEL(int cancel_state;)
1432

1433
  if (UNLIKELY(!GC_is_initialized))
2,458✔
1434
    GC_init();
×
1435
  if (GC_debugging_started)
2,458✔
1436
    GC_print_all_smashed();
148✔
1437
  GC_notify_or_invoke_finalizers();
2,458✔
1438
  LOCK();
2,458✔
1439
  if (force_unmap) {
2,458✔
1440
    /*
1441
     * Record current heap size to make heap growth more conservative
1442
     * afterwards (as if the heap is growing from zero size again).
1443
     */
1444
    GC_heapsize_at_forced_unmap = GC_heapsize;
70✔
1445
  }
1446
  DISABLE_CANCEL(cancel_state);
2,458✔
1447
#ifdef USE_MUNMAP
1448
  old_unmap_threshold = GC_unmap_threshold;
2,458✔
1449
  if (force_unmap || (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
2,458✔
1450
    GC_unmap_threshold = 1; /*< unmap as much as possible */
70✔
1451
#endif
1452
  /* Minimize junk left in my registers. */
1453
  GC_noop6(0, 0, 0, 0, 0, 0);
2,458✔
1454
  result = GC_try_to_collect_inner(stop_func != 0 ? stop_func
2,458✔
1455
                                                  : GC_default_stop_func);
1456
#ifdef USE_MUNMAP
1457
  /* Restore it. */
1458
  GC_unmap_threshold = old_unmap_threshold;
2,458✔
1459
#endif
1460
  RESTORE_CANCEL(cancel_state);
2,458✔
1461
  UNLOCK();
2,458✔
1462
  if (result) {
2,458✔
1463
    if (GC_debugging_started)
2,441✔
1464
      GC_print_all_smashed();
148✔
1465
    GC_notify_or_invoke_finalizers();
2,441✔
1466
  }
1467
  return result;
2,458✔
1468
}
1469

1470
/* Externally callable routines to invoke full, stop-the-world collection. */
1471

1472
GC_API int GC_CALL
1473
GC_try_to_collect(GC_stop_func stop_func)
×
1474
{
1475
  GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
×
1476
  return (int)GC_try_to_collect_general(stop_func, FALSE);
×
1477
}
1478

1479
GC_API void GC_CALL
1480
GC_gcollect(void)
2,388✔
1481
{
1482
  /*
1483
   * Zero is passed as stop_func to get `GC_default_stop_func` value
1484
   * while holding the allocator lock (to prevent data race).
1485
   */
1486
  (void)GC_try_to_collect_general(0, FALSE);
2,388✔
1487
  if (get_have_errors())
2,388✔
1488
    GC_print_all_errors();
56✔
1489
}
2,388✔
1490

1491
GC_API void GC_CALL
1492
GC_gcollect_and_unmap(void)
70✔
1493
{
1494
  /* Collect and force memory unmapping to OS. */
1495
  (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
70✔
1496
}
70✔
1497

1498
GC_INNER ptr_t
1499
GC_os_get_mem(size_t bytes)
1,928✔
1500
{
1501
  ptr_t space;
1502

1503
  GC_ASSERT(I_HOLD_LOCK());
1,928✔
1504
  space = (ptr_t)GET_MEM(bytes); /*< `HBLKSIZE`-aligned */
1,928✔
1505
  if (UNLIKELY(NULL == space))
1,928✔
1506
    return NULL;
×
1507
#ifdef USE_PROC_FOR_LIBRARIES
1508
  /* Add `HBLKSIZE`-aligned `GET_MEM`-generated block to `GC_our_memory`. */
1509
  if (GC_n_memory >= MAX_HEAP_SECTS)
1510
    ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1511
  GC_our_memory[GC_n_memory].hs_start = space;
1512
  GC_our_memory[GC_n_memory].hs_bytes = bytes;
1513
  GC_n_memory++;
1514
#endif
1515
  GC_our_mem_bytes += bytes;
1,928✔
1516
  GC_VERBOSE_LOG_PRINTF("Got %lu bytes from OS\n", (unsigned long)bytes);
1,928✔
1517
  return space;
1,928✔
1518
}
1519

1520
/*
1521
 * Use the chunk of memory starting at `h` of size `sz` as part of the heap.
1522
 * Assumes `h` is `HBLKSIZE`-aligned, `sz` is a multiple of `HBLKSIZE`.
1523
 */
1524
STATIC void
1525
GC_add_to_heap(struct hblk *h, size_t sz)
669✔
1526
{
1527
  hdr *hhdr;
1528
  ptr_t endp;
1529
  size_t old_capacity = 0;
669✔
1530
  void *old_heap_sects = NULL;
669✔
1531
#ifdef GC_ASSERTIONS
1532
  size_t i;
1533
#endif
1534

1535
  GC_ASSERT(I_HOLD_LOCK());
669✔
1536
  GC_ASSERT(ADDR(h) % HBLKSIZE == 0);
669✔
1537
  GC_ASSERT(sz % HBLKSIZE == 0);
669✔
1538
  GC_ASSERT(sz > 0);
669✔
1539
  GC_ASSERT(GC_all_nils != NULL);
669✔
1540

1541
  if (UNLIKELY(GC_n_heap_sects == GC_capacity_heap_sects)) {
669✔
1542
    /* Allocate new `GC_heap_sects` with sufficient capacity. */
1543
#ifndef INITIAL_HEAP_SECTS
1544
#  define INITIAL_HEAP_SECTS 32
1545
#endif
1546
    size_t new_capacity
40✔
1547
        = GC_n_heap_sects > 0 ? GC_n_heap_sects * 2 : INITIAL_HEAP_SECTS;
40✔
1548
    void *new_heap_sects
1549
        = GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
40✔
1550

1551
    if (NULL == new_heap_sects) {
40✔
1552
      /* Retry with smaller yet sufficient capacity. */
1553
      new_capacity = GC_n_heap_sects + INITIAL_HEAP_SECTS;
×
1554
      new_heap_sects
1555
          = GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
×
1556
      if (NULL == new_heap_sects)
×
1557
        ABORT("Insufficient memory for heap sections");
×
1558
    }
1559
    old_capacity = GC_capacity_heap_sects;
40✔
1560
    old_heap_sects = GC_heap_sects;
40✔
1561
    /* Transfer `GC_heap_sects` contents to the newly allocated array. */
1562
    if (GC_n_heap_sects > 0)
40✔
1563
      BCOPY(old_heap_sects, new_heap_sects,
6✔
1564
            GC_n_heap_sects * sizeof(struct HeapSect));
1565
    GC_capacity_heap_sects = new_capacity;
40✔
1566
    GC_heap_sects = (struct HeapSect *)new_heap_sects;
40✔
1567
    GC_COND_LOG_PRINTF("Grew heap sections array to %lu elements\n",
40✔
1568
                       (unsigned long)new_capacity);
1569
  }
1570

1571
  while (UNLIKELY(ADDR(h) <= HBLKSIZE)) {
669✔
1572
    /* Cannot handle memory near address zero. */
1573
    ++h;
×
1574
    sz -= HBLKSIZE;
×
1575
    if (0 == sz)
×
1576
      return;
×
1577
  }
1578
  while (UNLIKELY(ADDR(h) >= GC_WORD_MAX - sz)) {
669✔
1579
    /* Prevent overflow when calculating `endp`. */
1580
    sz -= HBLKSIZE;
×
1581
    if (0 == sz)
×
1582
      return;
×
1583
  }
1584
  endp = (ptr_t)h + sz;
669✔
1585

1586
  hhdr = GC_install_header(h);
669✔
1587
  if (UNLIKELY(NULL == hhdr)) {
669✔
1588
    /*
1589
     * This is extremely unlikely. Cannot add it.  This will almost
1590
     * certainly result in a `NULL` returned from the allocator, which
1591
     * is entirely appropriate.
1592
     */
1593
    return;
×
1594
  }
1595
#ifdef GC_ASSERTIONS
1596
  /* Ensure no intersection between sections. */
1597
  for (i = 0; i < GC_n_heap_sects; i++) {
17,012✔
1598
    ptr_t hs_start = GC_heap_sects[i].hs_start;
16,343✔
1599
    ptr_t hs_end = hs_start + GC_heap_sects[i].hs_bytes;
16,343✔
1600

1601
    GC_ASSERT(!(ADDR_INSIDE((ptr_t)h, hs_start, hs_end)
16,343✔
1602
                || (ADDR_LT(hs_start, endp) && ADDR_GE(hs_end, endp))
1603
                || (ADDR_LT((ptr_t)h, hs_start) && ADDR_LT(hs_end, endp))));
1604
  }
1605
#endif
1606
  GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)h;
669✔
1607
  GC_heap_sects[GC_n_heap_sects].hs_bytes = sz;
669✔
1608
  GC_n_heap_sects++;
669✔
1609
  hhdr->hb_block = h;
669✔
1610
  hhdr->hb_sz = sz;
669✔
1611
  hhdr->hb_flags = 0;
669✔
1612
  GC_freehblk(h);
669✔
1613
  GC_heapsize += sz;
669✔
1614

1615
  if (ADDR_GE((ptr_t)GC_least_plausible_heap_addr, (ptr_t)h)
669✔
1616
      || UNLIKELY(NULL == GC_least_plausible_heap_addr)) {
622✔
1617
    /*
1618
     * Making it a little smaller than necessary prevents us from
1619
     * getting a false hit from the variable itself.  There is some
1620
     * unintentional reflection here.
1621
     */
1622
    GC_least_plausible_heap_addr = (ptr_t)h - sizeof(ptr_t);
47✔
1623
  }
1624
  if (ADDR_LT((ptr_t)GC_greatest_plausible_heap_addr, endp)) {
669✔
1625
    GC_greatest_plausible_heap_addr = endp;
×
1626
  }
1627
#ifdef SET_REAL_HEAP_BOUNDS
1628
  if (ADDR(h) < GC_least_real_heap_addr
669✔
1629
      || UNLIKELY(0 == GC_least_real_heap_addr))
156✔
1630
    GC_least_real_heap_addr = ADDR(h) - sizeof(ptr_t);
547✔
1631
  if (GC_greatest_real_heap_addr < ADDR(endp)) {
669✔
1632
#  ifdef INCLUDE_LINUX_THREAD_DESCR
1633
    /* Avoid heap intersection with the static data roots. */
1634
    GC_exclude_static_roots_inner((ptr_t)h, endp);
1635
#  endif
1636
    GC_greatest_real_heap_addr = ADDR(endp);
59✔
1637
  }
1638
#endif
1639
  GC_handle_protected_regions_limit();
669✔
1640
  if (UNLIKELY(old_capacity > 0)) {
669✔
1641
#ifndef GWW_VDB
1642
    /*
1643
     * Recycling may call `GC_add_to_heap()` again but should not cause
1644
     * resizing of `GC_heap_sects`.
1645
     */
1646
    GC_scratch_recycle_no_gww(old_heap_sects,
6✔
1647
                              old_capacity * sizeof(struct HeapSect));
1648
#else
1649
    /* TODO: Implement GWW-aware recycling as in `alloc_mark_stack`. */
1650
    GC_noop1_ptr(old_heap_sects);
1651
#endif
1652
  }
1653
}
1654

1655
#ifndef NO_DEBUGGING
1656
void
1657
GC_print_heap_sects(void)
×
1658
{
1659
  size_t i;
1660

1661
  GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
×
1662
            (unsigned long)GC_heapsize /*, */
×
1663
                COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
×
1664

1665
  for (i = 0; i < GC_n_heap_sects; i++) {
×
1666
    ptr_t start = GC_heap_sects[i].hs_start;
×
1667
    size_t len = GC_heap_sects[i].hs_bytes;
×
1668
    unsigned nbl = 0;
×
1669
#  ifndef NO_BLACK_LISTING
1670
    struct hblk *h;
1671

1672
    for (h = (struct hblk *)start; ADDR_LT((ptr_t)h, start + len); h++) {
×
1673
      if (GC_is_black_listed(h, HBLKSIZE))
×
1674
        nbl++;
×
1675
    }
1676
#  endif
1677
    GC_printf("Section %u from %p to %p %u/%lu blacklisted\n", (unsigned)i,
×
1678
              (void *)start, (void *)&start[len], nbl,
×
1679
              (unsigned long)divHBLKSZ(len));
×
1680
  }
1681
}
×
1682
#endif /* !NO_DEBUGGING */
1683

1684
void *GC_least_plausible_heap_addr = MAKE_CPTR(GC_WORD_MAX);
1685
void *GC_greatest_plausible_heap_addr = NULL;
1686

1687
STATIC word GC_max_heapsize = 0;
1688

1689
GC_API void GC_CALL
1690
GC_set_max_heap_size(GC_word n)
2✔
1691
{
1692
  GC_max_heapsize = n;
2✔
1693
}
2✔
1694

1695
word GC_max_retries = 0;
1696

1697
GC_INNER void
1698
GC_scratch_recycle_inner(void *ptr, size_t sz)
144✔
1699
{
1700
  size_t page_offset;
1701
  size_t displ = 0;
144✔
1702
  size_t recycled_bytes;
1703

1704
  GC_ASSERT(I_HOLD_LOCK());
144✔
1705
  if (NULL == ptr)
144✔
1706
    return;
×
1707

1708
  GC_ASSERT(sz != 0);
144✔
1709
  GC_ASSERT(GC_page_size != 0);
144✔
1710
  /* TODO: Assert correct memory flags if `GWW_VDB`. */
1711
  page_offset = ADDR(ptr) & (GC_page_size - 1);
144✔
1712
  if (page_offset != 0)
144✔
1713
    displ = GC_page_size - page_offset;
2✔
1714
  recycled_bytes = sz > displ ? (sz - displ) & ~(GC_page_size - 1) : 0;
144✔
1715
  GC_COND_LOG_PRINTF("Recycle %lu/%lu scratch-allocated bytes at %p\n",
144✔
1716
                     (unsigned long)recycled_bytes, (unsigned long)sz, ptr);
1717
  if (recycled_bytes > 0)
144✔
1718
    GC_add_to_heap((struct hblk *)((ptr_t)ptr + displ), recycled_bytes);
138✔
1719
}
1720

1721
GC_INNER GC_bool
1722
GC_expand_hp_inner(word n)
551✔
1723
{
1724
  size_t sz;
1725
  struct hblk *space;
1726
  /* Number of bytes by which we expect the heap to expand soon. */
1727
  word expansion_slop;
1728

1729
  GC_ASSERT(I_HOLD_LOCK());
551✔
1730
  GC_ASSERT(GC_page_size != 0);
551✔
1731
  if (0 == n)
551✔
1732
    n = 1;
2✔
1733
  sz = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
551✔
1734
  GC_DBGLOG_PRINT_HEAP_IN_USE();
551✔
1735
  if (GC_max_heapsize != 0
551✔
1736
      && (GC_max_heapsize < (word)sz
24✔
1737
          || GC_heapsize > GC_max_heapsize - (word)sz)) {
4✔
1738
    /* Exceeded the self-imposed limit. */
1739
    return FALSE;
20✔
1740
  }
1741
  space = (struct hblk *)GC_os_get_mem(sz);
531✔
1742
  if (UNLIKELY(NULL == space)) {
531✔
1743
    WARN("Failed to expand heap by %" WARN_PRIuPTR " KiB\n", sz >> 10);
×
1744
    return FALSE;
×
1745
  }
1746
  GC_last_heap_growth_gc_no = GC_gc_no;
531✔
1747
  GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
531✔
1748
                    TO_KiB_UL(GC_heapsize + sz),
×
1749
                    (unsigned long)GC_bytes_allocd);
×
1750

1751
  /*
1752
   * Adjust heap limits generously for black-listing to work better.
1753
   * `GC_add_to_heap()` performs minimal adjustment needed for correctness.
1754
   */
1755
  expansion_slop = min_bytes_allocd() + 4 * MAXHINCR * HBLKSIZE;
531✔
1756
  if ((0 == GC_last_heap_addr && (ADDR(space) & SIGNB) == 0)
531✔
1757
      || (GC_last_heap_addr != 0 && GC_last_heap_addr < ADDR(space))) {
497✔
1758
    /* Assume the heap is growing up. */
1759
    if (LIKELY(ADDR(space) < GC_WORD_MAX - (sz + expansion_slop))) {
78✔
1760
      ptr_t new_limit = (ptr_t)space + sz + expansion_slop;
39✔
1761

1762
      if (ADDR_LT((ptr_t)GC_greatest_plausible_heap_addr, new_limit))
39✔
1763
        GC_greatest_plausible_heap_addr = new_limit;
35✔
1764
    }
1765
  } else {
1766
    /* Heap is growing down. */
1767
    if (LIKELY(ADDR(space) > expansion_slop + sizeof(ptr_t))) {
492✔
1768
      ptr_t new_limit = (ptr_t)space - expansion_slop - sizeof(ptr_t);
492✔
1769

1770
      if (ADDR_LT(new_limit, (ptr_t)GC_least_plausible_heap_addr))
492✔
1771
        GC_least_plausible_heap_addr = new_limit;
435✔
1772
    }
1773
  }
1774
  GC_last_heap_addr = ADDR(space);
531✔
1775

1776
  GC_add_to_heap(space, sz);
531✔
1777
  if (GC_on_heap_resize)
531✔
1778
    (*GC_on_heap_resize)(GC_heapsize);
×
1779

1780
  return TRUE;
531✔
1781
}
1782

1783
GC_API int GC_CALL
1784
GC_expand_hp(size_t bytes)
4✔
1785
{
1786
  size_t n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(bytes);
4✔
1787
#ifndef NO_WARN_HEAP_GROW_WHEN_GC_DISABLED
1788
  word old_heapsize;
1789
#endif
1790
  GC_bool result;
1791

1792
  if (UNLIKELY(!GC_is_initialized))
4✔
1793
    GC_init();
×
1794
  LOCK();
4✔
1795
#ifndef NO_WARN_HEAP_GROW_WHEN_GC_DISABLED
1796
  old_heapsize = GC_heapsize;
4✔
1797
#endif
1798
  result = GC_expand_hp_inner(n_blocks);
4✔
1799
  if (result) {
4✔
1800
    GC_requested_heapsize += bytes;
4✔
1801
#ifndef NO_WARN_HEAP_GROW_WHEN_GC_DISABLED
1802
    if (GC_dont_gc) {
4✔
1803
      /* Do not call `WARN()` if the heap growth is intentional. */
1804
      GC_ASSERT(GC_heapsize >= old_heapsize);
2✔
1805
      GC_heapsize_on_gc_disable += GC_heapsize - old_heapsize;
2✔
1806
    }
1807
#endif
1808
  }
1809
  UNLOCK();
4✔
1810
  /*
1811
   * Really returns a `GC_bool` value, but the function is externally
1812
   * visible, so that is clumsy.
1813
   */
1814
  return (int)result;
4✔
1815
}
1816

1817
/*
1818
 * The minimum value of the ratio of allocated bytes since the latest
1819
 * collection to the amount of finalizers created since that collection
1820
 * which triggers the collection instead heap expansion.  Has no effect
1821
 * in the incremental mode.
1822
 */
1823
#if defined(GC_ALLOCD_BYTES_PER_FINALIZER) && !defined(CPPCHECK)
1824
STATIC word GC_allocd_bytes_per_finalizer = GC_ALLOCD_BYTES_PER_FINALIZER;
1825
#else
1826
STATIC word GC_allocd_bytes_per_finalizer = 10000;
1827
#endif
1828

1829
GC_API void GC_CALL
1830
GC_set_allocd_bytes_per_finalizer(GC_word value)
2✔
1831
{
1832
  GC_allocd_bytes_per_finalizer = value;
2✔
1833
}
2✔
1834

1835
GC_API GC_word GC_CALL
1836
GC_get_allocd_bytes_per_finalizer(void)
2✔
1837
{
1838
  return GC_allocd_bytes_per_finalizer;
2✔
1839
}
1840

1841
GC_INNER GC_bool
1842
GC_collect_or_expand(word needed_blocks, unsigned flags, GC_bool retry)
9,001✔
1843
{
1844
  static word last_fo_entries, last_bytes_finalized;
1845

1846
  GC_bool gc_not_stopped = TRUE;
9,001✔
1847
  word blocks_to_get;
1848
  IF_CANCEL(int cancel_state;)
1849

1850
  GC_ASSERT(I_HOLD_LOCK());
9,001✔
1851
  GC_ASSERT(GC_is_initialized);
9,001✔
1852
  DISABLE_CANCEL(cancel_state);
9,001✔
1853
  if (!GC_incremental && !GC_dont_gc
9,001✔
1854
      && ((GC_dont_expand && GC_bytes_allocd > 0)
8,568✔
1855
          || (GC_fo_entries > last_fo_entries
8,568✔
1856
              && (last_bytes_finalized | GC_bytes_finalized) != 0
474✔
1857
              && (GC_fo_entries - last_fo_entries)
444✔
1858
                         * GC_allocd_bytes_per_finalizer
444✔
1859
                     > GC_bytes_allocd)
444✔
1860
          || GC_should_collect())) {
8,124✔
1861
    /*
1862
     * Try to do a full collection using "default" `stop_func` (unless
1863
     * nothing has been allocated since the latest collection or heap
1864
     * expansion is disabled).
1865
     */
1866
    gc_not_stopped = GC_try_to_collect_inner(
8,488✔
1867
        GC_bytes_allocd > 0 && (!GC_dont_expand || !retry)
8,488✔
1868
            ? GC_default_stop_func
1869
            : GC_never_stop_func);
1870
    if (gc_not_stopped || !retry) {
8,488✔
1871
      /*
1872
       * Either the collection has not been aborted or this is the
1873
       * first attempt (in a loop).
1874
       */
1875
      last_fo_entries = GC_fo_entries;
8,488✔
1876
      last_bytes_finalized = GC_bytes_finalized;
8,488✔
1877
      RESTORE_CANCEL(cancel_state);
8,488✔
1878
      return TRUE;
9,001✔
1879
    }
1880
  }
1881

1882
  blocks_to_get = (GC_heapsize - GC_heapsize_at_forced_unmap)
513✔
1883
                      / (HBLKSIZE * GC_free_space_divisor)
513✔
1884
                  + needed_blocks;
1885
  if (blocks_to_get > MAXHINCR) {
513✔
1886
#ifdef NO_BLACK_LISTING
1887
    UNUSED_ARG(flags);
1888
    blocks_to_get = needed_blocks > MAXHINCR ? needed_blocks : MAXHINCR;
1889
#else
1890
    word slop;
1891

1892
    /*
1893
     * Get the minimum required to make it likely that we can satisfy
1894
     * the current request in the presence of black-listing.  This will
1895
     * probably be bigger than `MAXHINCR`.
1896
     */
1897
    if ((flags & IGNORE_OFF_PAGE) != 0) {
61✔
1898
      slop = 4;
×
1899
    } else {
1900
      slop = 2 * divHBLKSZ(BL_LIMIT);
61✔
1901
      if (slop > needed_blocks)
61✔
1902
        slop = needed_blocks;
41✔
1903
    }
1904
    if (needed_blocks + slop > MAXHINCR) {
61✔
1905
      blocks_to_get = needed_blocks + slop;
20✔
1906
    } else {
1907
      blocks_to_get = MAXHINCR;
41✔
1908
    }
1909
#endif
1910
    if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
61✔
1911
      blocks_to_get = divHBLKSZ(GC_WORD_MAX);
10✔
1912
  } else if (blocks_to_get < MINHINCR) {
452✔
1913
    blocks_to_get = MINHINCR;
99✔
1914
  }
1915

1916
  if (GC_max_heapsize > GC_heapsize) {
513✔
1917
    word max_get_blocks = divHBLKSZ(GC_max_heapsize - GC_heapsize);
20✔
1918
    if (blocks_to_get > max_get_blocks)
20✔
1919
      blocks_to_get
1920
          = max_get_blocks > needed_blocks ? max_get_blocks : needed_blocks;
20✔
1921
  }
1922

1923
#ifdef USE_MUNMAP
1924
  if (GC_unmap_threshold > 1) {
513✔
1925
    /*
1926
     * Return as much memory to the OS as possible before trying to
1927
     * get memory from it.
1928
     */
1929
    GC_unmap_old(0);
513✔
1930
  }
1931
#endif
1932
  if (!GC_expand_hp_inner(blocks_to_get)
513✔
1933
      && (blocks_to_get == needed_blocks
20✔
1934
          || !GC_expand_hp_inner(needed_blocks))) {
×
1935
    if (!gc_not_stopped) {
20✔
1936
      /* Do not increment `GC_alloc_fail_count` here (and no warning). */
1937
      GC_gcollect_inner();
×
1938
      GC_ASSERT(0 == GC_bytes_allocd);
×
1939
    } else if (GC_alloc_fail_count++ < GC_max_retries) {
20✔
1940
      WARN("Out of Memory!  Trying to continue...\n", 0);
×
1941
      GC_gcollect_inner();
×
1942
    } else {
1943
#ifdef USE_MUNMAP
1944
      GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
20✔
1945
#endif
1946
#if !defined(SMALL_CONFIG) && (CPP_WORDSZ >= 32)
1947
#  define MAX_HEAPSIZE_WARNED_IN_BYTES (5 << 20) /*< 5 MB */
1948

1949
      if (GC_heapsize > (word)MAX_HEAPSIZE_WARNED_IN_BYTES) {
20✔
1950
        WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " MiB."
×
1951
             " Returning NULL!\n",
1952
             (GC_heapsize - GC_unmapped_bytes) >> 20);
1953
      } else
1954
#endif
1955
      /* else */ {
1956
        WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " bytes."
20✔
1957
             " Returning NULL!\n",
1958
             GC_heapsize - GC_unmapped_bytes);
1959
      }
1960
      RESTORE_CANCEL(cancel_state);
20✔
1961
      return FALSE;
20✔
1962
    }
1963
  } else if (GC_alloc_fail_count > 0) {
493✔
1964
    GC_COND_LOG_PRINTF("Memory available again...\n");
×
1965
  }
1966
  RESTORE_CANCEL(cancel_state);
493✔
1967
  return TRUE;
493✔
1968
}
1969

1970
GC_INNER ptr_t
1971
GC_allocobj(size_t lg, int kind)
792,809✔
1972
{
1973
#define MAX_ALLOCOBJ_RETRIES 3
1974
  int retry_cnt = 0;
792,809✔
1975
  void **flh = &GC_obj_kinds[kind].ok_freelist[lg];
792,809✔
1976
#ifndef GC_DISABLE_INCREMENTAL
1977
  GC_bool tried_minor = FALSE;
792,809✔
1978
#endif
1979

1980
  GC_ASSERT(I_HOLD_LOCK());
792,809✔
1981
  GC_ASSERT(GC_is_initialized);
792,809✔
1982
  if (UNLIKELY(0 == lg))
792,809✔
1983
    return NULL;
×
1984

1985
  while (NULL == *flh) {
799,153✔
1986
    /*
1987
     * Only a few iterations are expected at most, otherwise something
1988
     * is wrong in one of the functions called below.
1989
     */
1990
    if (retry_cnt > MAX_ALLOCOBJ_RETRIES)
799,153✔
1991
      ABORT("Too many retries in GC_allocobj");
×
1992
#ifndef GC_DISABLE_INCREMENTAL
1993
    if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED && !GC_dont_gc) {
799,153✔
1994
      /*
1995
       * True incremental mode, not just generational.
1996
       * Do our share of marking work.
1997
       */
1998
      GC_collect_a_little_inner(1);
×
1999
    }
2000
#endif
2001
    /* Sweep blocks for objects of this size. */
2002
    GC_ASSERT(!GC_is_full_gc || NULL == GC_obj_kinds[kind].ok_reclaim_list
799,153✔
2003
              || NULL == GC_obj_kinds[kind].ok_reclaim_list[lg]);
2004
    GC_continue_reclaim(lg, kind);
799,153✔
2005
#if defined(CPPCHECK)
2006
    GC_noop1_ptr(&flh);
2007
#endif
2008
    if (*flh != NULL)
799,153✔
2009
      break;
147,364✔
2010

2011
    GC_new_hblk(lg, kind);
651,789✔
2012
#if defined(CPPCHECK)
2013
    GC_noop1_ptr(&flh);
2014
#endif
2015
    if (*flh != NULL)
651,789✔
2016
      break;
645,445✔
2017

2018
#ifndef GC_DISABLE_INCREMENTAL
2019
    if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED && !tried_minor
6,344✔
2020
        && !GC_dont_gc) {
244✔
2021
      GC_collect_a_little_inner(1);
244✔
2022
      tried_minor = TRUE;
244✔
2023
      continue;
244✔
2024
    }
2025
#endif
2026
    if (UNLIKELY(!GC_collect_or_expand(1, 0 /* flags */, retry_cnt > 0)))
6,100✔
2027
      return NULL;
×
2028
    retry_cnt++;
6,100✔
2029
  }
2030
  /* Successful allocation; reset failure count. */
2031
  GC_alloc_fail_count = 0;
792,809✔
2032
  return (ptr_t)(*flh);
792,809✔
2033
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc