• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 2137

07 May 2026 03:10PM UTC coverage: 80.39% (+0.1%) from 80.268%
2137

push

travis-ci

ivmai
Adjust calling conventions for callbacks in msvc_dbg.c
(refactoring)

* extra/msvc_dbg.c (FunctionTableAccess, GetModuleBase): Change
calling convention from `CALLBACK` to `WINAPI`.

7207 of 8965 relevant lines covered (80.39%)

18162301.35 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.65
/alloc.c
1
/*
2
 * Copyright (c) 1988-1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 1999-2011 Hewlett-Packard Development Company, L.P.
6
 * Copyright (c) 2008-2025 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_priv.h"
19

20
/*
21
 * Separate free lists are maintained for different sized objects up
22
 * to `MAXOBJBYTES`.
23
 * The call `GC_allocobj(lg, kind)` ensures that the free list for the given
24
 * kind of objects of the given size in granules is a nonempty one.
25
 * It returns a pointer to the first entry on the free list.
26
 * In a single-threaded world, `GC_allocobj` may be called to allocate
27
 * an object of small size `lb` (and `NORMAL` kind) as follows
28
 * (`GC_generic_malloc_inner` is a wrapper over `GC_allocobj` which also
29
 * fills in `GC_size_map` if needed):
30
 *
31
 * ```
32
 *   lg = GC_size_map[lb];
33
 *   op = GC_objfreelist[lg];
34
 *   if (NULL == op) {
35
 *     op = GC_generic_malloc_inner(lb, NORMAL, 0);
36
 *   } else {
37
 *     GC_objfreelist[lg] = obj_link(op);
38
 *     GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
39
 *   }
40
 * ```
41
 *
42
 * Note that this is very fast if the free list is not empty; it should
43
 * only involve the execution of 4 or 5 simple instructions.
44
 * All composite objects on freelists are cleared, except for
45
 * their first "pointer-sized" word.
46
 */
47

48
/*
49
 * The allocator uses `GC_allochblk` to allocate large chunks of objects.
50
 * These chunks all start on addresses that are multiples of `HBLKSIZE`.
51
 * Each allocated chunk has an associated header, which can be located
52
 * quickly based on the address of the chunk.  This makes it possible
53
 * to check quickly whether an arbitrary address corresponds to an object
54
 * administered by the allocator.  (See `headers.c` file for details.)
55
 */
56

57
/* Number of bytes not intended to be collected. */
58
word GC_non_gc_bytes = 0;
59

60
word GC_gc_no = 0;
61

62
#ifndef NO_CLOCK
63

64
GC_API void GC_CALL
65
GC_start_performance_measurement(void)
3✔
66
{
67
  GC_measure_performance = TRUE;
3✔
68
}
3✔
69

70
GC_API unsigned long GC_CALL
71
GC_get_full_gc_total_time(void)
3✔
72
{
73
  return GC_full_gc_total_time;
3✔
74
}
75

76
GC_API unsigned long GC_CALL
77
GC_get_stopped_mark_total_time(void)
3✔
78
{
79
  return GC_stopped_mark_total_time;
3✔
80
}
81

82
#  ifndef MAX_TOTAL_TIME_DIVISOR
83
/*
84
 * We shall not use big values here (so "outdated" delay time values would
85
 * have less impact on "average" delay time value than newer ones).
86
 */
87
#    define MAX_TOTAL_TIME_DIVISOR 1000
88
#  endif
89

90
GC_API unsigned long GC_CALL
91
GC_get_avg_stopped_mark_time_ns(void)
3✔
92
{
93
  unsigned long total_time;
94
  unsigned divisor;
95

96
  READER_LOCK();
3✔
97
  total_time = (unsigned long)GC_world_stopped_total_time;
3✔
98
  divisor = GC_world_stopped_total_divisor;
3✔
99
  READER_UNLOCK();
3✔
100
  if (0 == divisor) {
3✔
101
    GC_ASSERT(0 == total_time);
×
102
    /*
103
     * No world-stopped collection has occurred since the start of
104
     * performance measurements.
105
     */
106
    return 0;
×
107
  }
108

109
  /* Halve values to prevent overflow during the multiplication. */
110
  for (; total_time > ~0UL / (1000UL * 1000); total_time >>= 1) {
3✔
111
    divisor >>= 1;
×
112
    if (UNLIKELY(0 == divisor)) {
×
113
      /* The actual result is larger than representable value. */
114
      return ~0UL;
×
115
    }
116
  }
117

118
  return total_time * (1000UL * 1000) / divisor;
3✔
119
}
120

121
#endif /* !NO_CLOCK */
122

123
GC_API int GC_CALL
124
GC_is_incremental_mode(void)
17✔
125
{
126
  return (int)GC_incremental;
17✔
127
}
128

129
#ifdef THREADS
130
int GC_parallel = FALSE; /*< parallel collection is off by default */
131
#endif
132

133
#ifdef GC_FULL_FREQ
134
int GC_full_freq = GC_FULL_FREQ;
135
#else
136
/*
137
 * Every `GC_full_freq + 1` collection is a full collection, whether we
138
 * need it or not.
139
 */
140
int GC_full_freq = 19;
141
#endif
142

143
#ifdef THREAD_LOCAL_ALLOC
144
GC_INNER GC_bool GC_world_stopped = FALSE;
145
#endif
146

147
STATIC GC_bool GC_disable_automatic_collection = FALSE;
148

149
GC_API void GC_CALL
150
GC_set_disable_automatic_collection(int value)
3✔
151
{
152
  LOCK();
3✔
153
  GC_disable_automatic_collection = value != 0;
3✔
154
  UNLOCK();
3✔
155
}
3✔
156

157
GC_API int GC_CALL
158
GC_get_disable_automatic_collection(void)
3✔
159
{
160
  int value;
161

162
  READER_LOCK();
3✔
163
  value = (int)GC_disable_automatic_collection;
3✔
164
  READER_UNLOCK();
3✔
165
  return value;
3✔
166
}
167

168
/*
169
 * The version macros are now defined in `gc_version.h` file, which is
170
 * included by `gc.h` file, which, in turn, is included by `gc_priv.h` file.
171
 */
172
#ifndef GC_NO_VERSION_VAR
173
EXTERN_C_BEGIN
174
extern const GC_VERSION_VAL_T GC_version;
175
EXTERN_C_END
176

177
const GC_VERSION_VAL_T GC_version = ((GC_VERSION_VAL_T)GC_VERSION_MAJOR << 16)
178
                                    | (GC_VERSION_MINOR << 8)
179
                                    | GC_VERSION_MICRO;
180
#endif
181

182
GC_API GC_VERSION_VAL_T GC_CALL
183
GC_get_version(void)
3✔
184
{
185
  return ((GC_VERSION_VAL_T)GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8)
3✔
186
         | GC_VERSION_MICRO;
187
}
188

189
GC_API int GC_CALL
190
GC_get_dont_add_byte_at_end(void)
63✔
191
{
192
#if MAX_EXTRA_BYTES > 0
193
  /* This is meaningful only if `GC_all_interior_pointers`. */
194
  return 0;
63✔
195
#else
196
  return 1;
197
#endif
198
}
199

200
/* Some more variables. */
201

202
#ifdef GC_DONT_EXPAND
203
int GC_dont_expand = TRUE;
204
#else
205
int GC_dont_expand = FALSE;
206
#endif
207

208
#ifdef GC_FREE_SPACE_DIVISOR
209
word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR; /*< must be positive */
210
#else
211
word GC_free_space_divisor = 3;
212
#endif
213

214
GC_INNER int GC_CALLBACK
215
GC_never_stop_func(void)
26,953,867✔
216
{
217
  return FALSE;
26,953,867✔
218
}
219

220
#ifdef GC_TIME_LIMIT
221
/*
222
 * We try to keep pause times from exceeding this by much.
223
 * In milliseconds.
224
 */
225
unsigned long GC_time_limit = GC_TIME_LIMIT;
226
#elif defined(PARALLEL_MARK)
227
/*
228
 * The parallel marker cannot be interrupted for now, so the time limit
229
 * is absent by default.
230
 */
231
unsigned long GC_time_limit = GC_TIME_UNLIMITED;
232
#else
233
unsigned long GC_time_limit = 15;
234
#endif
235

236
#ifndef NO_CLOCK
237
/*
238
 * The nanoseconds add-on to `GC_time_limit` value.
239
 * Not updated by `GC_set_time_limit()`.
240
 * Ignored if the value of `GC_time_limit` is `GC_TIME_UNLIMITED`.
241
 */
242
STATIC unsigned long GC_time_lim_nsec = 0;
243

244
#  define TV_NSEC_LIMIT (1000UL * 1000) /*< amount of nanoseconds in 1 ms */
245

246
GC_API void GC_CALL
247
GC_set_time_limit_tv(struct GC_timeval_s tv)
3✔
248
{
249
  GC_ASSERT(tv.tv_ms <= GC_TIME_UNLIMITED);
3✔
250
  GC_ASSERT(tv.tv_nsec < TV_NSEC_LIMIT);
3✔
251
  GC_time_limit = tv.tv_ms;
3✔
252
  GC_time_lim_nsec = tv.tv_nsec;
3✔
253
}
3✔
254

255
GC_API struct GC_timeval_s GC_CALL
256
GC_get_time_limit_tv(void)
3✔
257
{
258
  struct GC_timeval_s tv;
259

260
  tv.tv_ms = GC_time_limit;
3✔
261
  tv.tv_nsec = GC_time_lim_nsec;
3✔
262
  return tv;
3✔
263
}
264
#endif /* !NO_CLOCK */
265

266
/* Note: accessed holding the allocator lock. */
267
STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
268

269
GC_API void GC_CALL
270
GC_set_stop_func(GC_stop_func stop_func)
3✔
271
{
272
  GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
3✔
273
  LOCK();
3✔
274
  GC_default_stop_func = stop_func;
3✔
275
  UNLOCK();
3✔
276
}
3✔
277

278
GC_API GC_stop_func GC_CALL
279
GC_get_stop_func(void)
3✔
280
{
281
  GC_stop_func stop_func;
282

283
  READER_LOCK();
3✔
284
  stop_func = GC_default_stop_func;
3✔
285
  READER_UNLOCK();
3✔
286
  return stop_func;
3✔
287
}
288

289
#if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
290
#  define GC_timeout_stop_func GC_default_stop_func
291
#else
292
STATIC int GC_CALLBACK
293
GC_timeout_stop_func(void)
10,773,912✔
294
{
295
  CLOCK_TYPE current_time;
296
  static unsigned count = 0;
297
  unsigned long time_diff, nsec_diff;
298

299
  GC_ASSERT(I_HOLD_LOCK());
10,773,912✔
300
  if (GC_default_stop_func())
10,773,912✔
301
    return TRUE;
10,773,912✔
302

303
  if (GC_time_limit == GC_TIME_UNLIMITED || (count++ & 3) != 0)
10,773,825✔
304
    return FALSE;
10,773,825✔
305

306
  GET_TIME(current_time);
×
307
  time_diff = MS_TIME_DIFF(current_time, GC_start_time);
×
308
  nsec_diff = NS_FRAC_TIME_DIFF(current_time, GC_start_time);
×
309
#  if defined(CPPCHECK)
310
  /* Note: GCC reports a warning if `GC_noop1_ptr()` is used here. */
311
  GC_noop1(ADDR(&nsec_diff));
312
#  endif
313
  if (time_diff >= GC_time_limit
×
314
      && (time_diff > GC_time_limit || nsec_diff >= GC_time_lim_nsec)) {
×
315
    GC_COND_LOG_PRINTF(
×
316
        "Abandoning stopped marking after %lu ms %lu ns (attempt %d)\n",
317
        time_diff, nsec_diff, GC_n_attempts);
318
    return TRUE;
×
319
  }
320

321
  return FALSE;
×
322
}
323
#endif /* !GC_DISABLE_INCREMENTAL */
324

325
#ifdef THREADS
326
GC_INNER word GC_total_stacksize = 0;
327
#endif
328

329
/* The lowest value returned by `min_bytes_allocd()`. */
330
static size_t min_bytes_allocd_minimum = 1;
331

332
GC_API void GC_CALL
333
GC_set_min_bytes_allocd(size_t value)
3✔
334
{
335
  GC_ASSERT(value > 0);
3✔
336
  min_bytes_allocd_minimum = value;
3✔
337
}
3✔
338

339
GC_API size_t GC_CALL
340
GC_get_min_bytes_allocd(void)
3✔
341
{
342
  return min_bytes_allocd_minimum;
3✔
343
}
344

345
/*
346
 * Return the minimum number of bytes that must be allocated between
347
 * collections to amortize the cost of the latter.  Should be nonzero.
348
 */
349
static word
350
min_bytes_allocd(void)
49,076✔
351
{
352
  word result;
353
  word stack_size;
354
  /*
355
   * Total size of roots, it includes double stack size, since the stack
356
   * is expensive to scan.
357
   */
358
  word total_root_size;
359
  /* Estimate of memory to be scanned during normal collection. */
360
  word scan_size;
361

362
  GC_ASSERT(I_HOLD_LOCK());
49,076✔
363
#ifdef THREADS
364
  if (GC_has_running_threads()) {
49,076✔
365
    /* We are multi-threaded... */
366
    stack_size = GC_total_stacksize;
24,549✔
367
    /*
368
     * For now, we just use the value computed during the latest garbage
369
     * collection.
370
     */
371
#  ifdef DEBUG_THREADS
372
    GC_log_printf("Total stacks size: %lu\n", (unsigned long)stack_size);
373
#  endif
374
  } else
375
#endif
376
  /* else */ {
377
#ifdef STACK_NOT_SCANNED
378
    stack_size = 0;
379
#elif defined(STACK_GROWS_UP)
380
    stack_size = (word)(GC_approx_sp() - GC_stackbottom);
381
#else
382
    stack_size = (word)(GC_stackbottom - GC_approx_sp());
24,527✔
383
#endif
384
  }
385

386
  total_root_size = 2 * stack_size + GC_root_size;
49,076✔
387
  scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4 + total_root_size;
49,076✔
388
  result = scan_size / GC_free_space_divisor;
49,076✔
389
  if (GC_incremental) {
49,076✔
390
    result /= 2;
40,358✔
391
  }
392
  return result > min_bytes_allocd_minimum ? result : min_bytes_allocd_minimum;
49,076✔
393
}
394

395
/*
396
 * Return the number of bytes allocated, adjusted for explicit storage
397
 * management, etc.  This number is used in deciding when to trigger
398
 * collections.
399
 */
400
STATIC word
401
GC_adj_bytes_allocd(void)
8,997,626✔
402
{
403
  GC_signed_word result;
404
  GC_signed_word expl_managed = (GC_signed_word)GC_non_gc_bytes
8,997,626✔
405
                                - (GC_signed_word)GC_non_gc_bytes_at_gc;
8,997,626✔
406

407
  /*
408
   * Do not count what was explicitly freed, or newly allocated for
409
   * explicit management.  Note that deallocating an explicitly managed
410
   * object should not alter result, assuming the client is playing by
411
   * the rules.
412
   */
413
  result = (GC_signed_word)GC_bytes_allocd + (GC_signed_word)GC_bytes_dropped
8,997,626✔
414
           - (GC_signed_word)GC_bytes_freed
8,997,626✔
415
           + (GC_signed_word)GC_finalizer_bytes_freed - expl_managed;
8,997,626✔
416
  if (result > (GC_signed_word)GC_bytes_allocd) {
8,997,626✔
417
    /* Probably a client bug or unfortunate scheduling. */
418
    result = (GC_signed_word)GC_bytes_allocd;
758,599✔
419
  }
420
  /*
421
   * We count objects enqueued for finalization as though they had been
422
   * reallocated this round.  Finalization is visible to user.
423
   * And if we do not count this, we have stability problems for programs
424
   * that finalize all objects.
425
   */
426
  result += (GC_signed_word)GC_bytes_finalized;
8,997,626✔
427
  if (result < (GC_signed_word)(GC_bytes_allocd >> 3)) {
8,997,626✔
428
    /*
429
     * Always count at least 1/8 of the allocations.  We do not want to
430
     * collect too infrequently, since that would inhibit coalescing of
431
     * free storage blocks.  This also makes us partially robust against
432
     * client bugs.
433
     */
434
    result = (GC_signed_word)(GC_bytes_allocd >> 3);
11,138✔
435
  }
436
  return (word)result;
8,997,626✔
437
}
438

439
/*
440
 * Clear up a few frames worth of garbage left at the top of the stack.
441
 * This is used to prevent us from accidentally treating garbage left
442
 * on the stack by other parts of the collector as roots.
443
 * This differs from the code in `misc.c` file, which actually tries
444
 * to keep the stack clear of long-lived, client-generated garbage.
445
 */
446
STATIC void
447
GC_clear_a_few_frames(void)
31,323✔
448
{
449
#ifndef CLEAR_STACK_NPTRS
450
#  define CLEAR_STACK_NPTRS 64 /*< pointers */
451
#endif
452
  volatile ptr_t frames[CLEAR_STACK_NPTRS];
453

454
  BZERO(CAST_AWAY_VOLATILE_PVOID(frames), sizeof(frames));
31,323✔
455
}
31,323✔
456

457
GC_API void GC_CALL
458
GC_start_incremental_collection(void)
3✔
459
{
460
#ifndef GC_DISABLE_INCREMENTAL
461
  LOCK();
3✔
462
  if (GC_incremental) {
3✔
463
    GC_should_start_incremental_collection = TRUE;
3✔
464
    if (!GC_dont_gc) {
3✔
465
      GC_collect_a_little_inner(1);
3✔
466
    }
467
  }
468
  UNLOCK();
3✔
469
#endif
470
}
3✔
471

472
GC_INNER GC_bool
473
GC_should_collect(void)
9,001,826✔
474
{
475
  static word last_min_bytes_allocd, last_gc_no;
476

477
  GC_ASSERT(I_HOLD_LOCK());
9,001,826✔
478
  if (last_gc_no != GC_gc_no) {
9,001,826✔
479
    last_min_bytes_allocd = min_bytes_allocd();
30,686✔
480
    last_gc_no = GC_gc_no;
30,686✔
481
  }
482
#ifndef GC_DISABLE_INCREMENTAL
483
  if (GC_should_start_incremental_collection) {
9,001,826✔
484
    GC_should_start_incremental_collection = FALSE;
3✔
485
    return TRUE;
3✔
486
  }
487
#endif
488
  if (GC_disable_automatic_collection)
9,001,823✔
489
    return FALSE;
×
490

491
  if (GC_last_heap_growth_gc_no == GC_gc_no) {
9,001,823✔
492
    /* Avoid expanding past limits used by black-listing. */
493
    return TRUE;
4,197✔
494
  }
495

496
  return GC_adj_bytes_allocd() >= last_min_bytes_allocd;
8,997,626✔
497
}
498

499
/*
500
 * Called at start of full collections.  Not called if zero.
501
 * Called with the allocator lock held.  Not used by the collector itself.
502
 */
503
/* `STATIC` */ GC_start_callback_proc GC_start_call_back = 0;
504

505
GC_API void GC_CALL
506
GC_set_start_callback(GC_start_callback_proc fn)
3✔
507
{
508
  LOCK();
3✔
509
  GC_start_call_back = fn;
3✔
510
  UNLOCK();
3✔
511
}
3✔
512

513
GC_API GC_start_callback_proc GC_CALL
514
GC_get_start_callback(void)
3✔
515
{
516
  GC_start_callback_proc fn;
517

518
  READER_LOCK();
3✔
519
  fn = GC_start_call_back;
3✔
520
  READER_UNLOCK();
3✔
521
  return fn;
3✔
522
}
523

524
GC_INLINE void
525
GC_notify_full_gc(void)
13,544✔
526
{
527
  if (GC_start_call_back != 0) {
13,544✔
528
    (*GC_start_call_back)();
×
529
  }
530
}
13,544✔
531

532
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
533
STATIC void GC_finish_collection(void);
534

535
/*
536
 * Initiate a garbage collection if appropriate.  Choose judiciously
537
 * between partial, full, and stop-world collections.
538
 */
539
STATIC void
540
GC_maybe_gc(void)
8,515,910✔
541
{
542
  GC_ASSERT(I_HOLD_LOCK());
8,515,910✔
543
  ASSERT_CANCEL_DISABLED();
8,515,910✔
544
  if (!GC_should_collect())
8,515,910✔
545
    return;
8,496,905✔
546

547
  if (!GC_incremental) {
19,005✔
548
    GC_gcollect_inner();
×
549
    return;
×
550
  }
551

552
  GC_ASSERT(!GC_collection_in_progress());
19,005✔
553
#ifdef PARALLEL_MARK
554
  if (GC_parallel)
19,005✔
555
    GC_wait_for_reclaim();
10,967✔
556
#endif
557
  if (GC_need_full_gc || GC_n_partial_gcs >= GC_full_freq) {
19,005✔
558
    GC_COND_LOG_PRINTF(
1,313✔
559
        "***>Full mark for collection #%lu after %lu bytes allocated\n",
560
        (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
561
    GC_notify_full_gc();
1,313✔
562
    ENTER_GC();
1,313✔
563
    GC_promote_black_lists();
1,313✔
564
    (void)GC_reclaim_all((GC_stop_func)0, TRUE);
1,313✔
565
    GC_clear_marks();
1,313✔
566
    EXIT_GC();
1,313✔
567
    GC_n_partial_gcs = 0;
1,313✔
568
    GC_is_full_gc = TRUE;
1,313✔
569
  } else {
570
    GC_n_partial_gcs++;
17,692✔
571
  }
572

573
  /*
574
   * Try to mark with the world stopped.  If we run out of time, then this
575
   * turns into an incremental marking.
576
   */
577
#ifndef NO_CLOCK
578
  if (GC_time_limit != GC_TIME_UNLIMITED)
19,005✔
579
    GET_TIME(GC_start_time);
×
580
#endif
581
  if (GC_stopped_mark(GC_timeout_stop_func)) {
19,005✔
582
    SAVE_CALLERS_TO_LAST_STACK();
583
    GC_finish_collection();
18,918✔
584
  } else if (!GC_is_full_gc) {
87✔
585
    /* Count this as the first attempt. */
586
    GC_n_attempts++;
79✔
587
  }
588
}
589

590
STATIC GC_on_collection_event_proc GC_on_collection_event = 0;
591

592
GC_API void GC_CALL
593
GC_set_on_collection_event(GC_on_collection_event_proc fn)
3✔
594
{
595
  /* `fn` may be 0 (means no event notifier). */
596
  LOCK();
3✔
597
  GC_on_collection_event = fn;
3✔
598
  UNLOCK();
3✔
599
}
3✔
600

601
GC_API GC_on_collection_event_proc GC_CALL
602
GC_get_on_collection_event(void)
3✔
603
{
604
  GC_on_collection_event_proc fn;
605

606
  READER_LOCK();
3✔
607
  fn = GC_on_collection_event;
3✔
608
  READER_UNLOCK();
3✔
609
  return fn;
3✔
610
}
611

612
GC_INNER GC_bool
613
GC_try_to_collect_inner(GC_stop_func stop_func)
12,277✔
614
{
615
#ifndef NO_CLOCK
616
  CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
12,277✔
617
  GC_bool start_time_valid;
618
#endif
619

620
  ASSERT_CANCEL_DISABLED();
12,277✔
621
  GC_ASSERT(I_HOLD_LOCK());
12,277✔
622
  GC_ASSERT(GC_is_initialized);
12,277✔
623
  if (GC_dont_gc || (*stop_func)())
12,277✔
624
    return FALSE;
12,277✔
625
  if (GC_on_collection_event)
12,231✔
626
    GC_on_collection_event(GC_EVENT_START);
×
627
  if (GC_incremental && GC_collection_in_progress()) {
12,231✔
628
    GC_COND_LOG_PRINTF(
5✔
629
        "GC_try_to_collect_inner: finishing collection in progress\n");
630
    /* Just finish collection already in progress. */
631
    do {
632
      if ((*stop_func)()) {
431✔
633
        /* TODO: Notify `GC_EVENT_ABANDON`. */
634
        return FALSE;
×
635
      }
636
      GC_collect_a_little_inner(1);
431✔
637
    } while (GC_collection_in_progress());
431✔
638
  }
639
  GC_notify_full_gc();
12,231✔
640
#ifndef NO_CLOCK
641
  start_time_valid = FALSE;
12,231✔
642
  if ((GC_print_stats | (int)GC_measure_performance) != 0) {
12,231✔
643
    if (GC_print_stats)
3,410✔
644
      GC_log_printf("Initiating full world-stop collection!\n");
×
645
    start_time_valid = TRUE;
3,410✔
646
    GET_TIME(start_time);
3,410✔
647
  }
648
#endif
649
  GC_promote_black_lists();
12,231✔
650
  /*
651
   * Make sure all blocks have been reclaimed, so sweep routines do not
652
   * see cleared mark bits.  If we are guaranteed to finish, then this
653
   * is unnecessary.  In the find-leak case, we have to finish to
654
   * guarantee that previously unmarked objects are not reported as leaks.
655
   */
656
#ifdef PARALLEL_MARK
657
  if (GC_parallel)
12,231✔
658
    GC_wait_for_reclaim();
5,165✔
659
#endif
660
  ENTER_GC();
12,231✔
661
  if ((GC_find_leak_inner || stop_func != GC_never_stop_func)
12,231✔
662
      && !GC_reclaim_all(stop_func, FALSE)) {
3,365✔
663
    /* Aborted.  So far everything is still consistent. */
664
    EXIT_GC();
×
665
    /* TODO: Notify `GC_EVENT_ABANDON`. */
666
    return FALSE;
×
667
  }
668
  GC_invalidate_mark_state(); /*< flush mark stack */
12,231✔
669
  GC_clear_marks();
12,231✔
670
  SAVE_CALLERS_TO_LAST_STACK();
671
  GC_is_full_gc = TRUE;
12,231✔
672
  EXIT_GC();
12,231✔
673
  if (!GC_stopped_mark(stop_func)) {
12,231✔
674
    if (!GC_incremental) {
×
675
      /*
676
       * We are partially done and have no way to complete or use
677
       * current work.  Reestablish invariants as cheaply as possible.
678
       */
679
      GC_invalidate_mark_state();
×
680
      GC_unpromote_black_lists();
×
681
    } else {
682
      /*
683
       * We claim the world is already (or still) consistent.
684
       * We will finish incrementally.
685
       */
686
    }
687
    /* TODO: Notify `GC_EVENT_ABANDON`. */
688
    return FALSE;
×
689
  }
690
  GC_finish_collection();
12,231✔
691
#ifndef NO_CLOCK
692
  if (start_time_valid) {
12,231✔
693
    CLOCK_TYPE current_time;
694
    unsigned long time_diff, ns_frac_diff;
695

696
    GET_TIME(current_time);
3,410✔
697
    time_diff = MS_TIME_DIFF(current_time, start_time);
3,410✔
698
    ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
3,410✔
699
    if (GC_measure_performance) {
3,410✔
700
      GC_full_gc_total_time += time_diff; /*< may wrap */
3,410✔
701
      GC_full_gc_total_ns_frac += (unsigned32)ns_frac_diff;
3,410✔
702
      if (GC_full_gc_total_ns_frac >= (unsigned32)1000000UL) {
3,410✔
703
        /* Overflow of the nanoseconds part. */
704
        GC_full_gc_total_ns_frac -= (unsigned32)1000000UL;
1,706✔
705
        GC_full_gc_total_time++;
1,706✔
706
      }
707
    }
708
    if (GC_print_stats)
3,410✔
709
      GC_log_printf("Complete collection took %lu ms %lu ns\n", time_diff,
×
710
                    ns_frac_diff);
711
  }
712
#endif
713
  if (GC_on_collection_event)
12,231✔
714
    GC_on_collection_event(GC_EVENT_END);
×
715
  return TRUE;
12,231✔
716
}
717

718
/* The default value of `GC_rate`. */
719
#ifndef GC_RATE
720
#  define GC_RATE 10
721
#endif
722

723
/*
724
 * When `GC_collect_a_little_inner()` performs `n_blocks` units of garbage
725
 * collection work, a unit is intended to touch roughly `GC_rate` pages.
726
 * (But, every once in a while, we do more than that.)  This needs to be
727
 * a fairly large number with our current incremental collection strategy,
728
 * since otherwise we allocate too much during garbage collection, and
729
 * the cleanup gets expensive.
730
 */
731
STATIC unsigned GC_rate = GC_RATE;
732

733
GC_API void GC_CALL
734
GC_set_rate(int value)
3✔
735
{
736
  GC_ASSERT(value > 0);
3✔
737
  GC_rate = (unsigned)value;
3✔
738
}
3✔
739

740
GC_API int GC_CALL
741
GC_get_rate(void)
3✔
742
{
743
  return (int)GC_rate;
3✔
744
}
745

746
/* The default maximum number of prior attempts at world stop marking. */
747
#ifndef MAX_PRIOR_ATTEMPTS
748
#  define MAX_PRIOR_ATTEMPTS 3
749
#endif
750

751
/*
752
 * The maximum number of prior attempts at world stop marking.
753
 * A value of 1 means that we finish the second time, no matter how long
754
 * it takes.  Does not count the initial root scan for a full collection.
755
 */
756
static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
757

758
GC_API void GC_CALL
759
GC_set_max_prior_attempts(int value)
3✔
760
{
761
  GC_ASSERT(value >= 0);
3✔
762
  max_prior_attempts = value;
3✔
763
}
3✔
764

765
GC_API int GC_CALL
766
GC_get_max_prior_attempts(void)
3✔
767
{
768
  return max_prior_attempts;
3✔
769
}
770

771
GC_INNER void
772
GC_collect_a_little_inner(size_t n_blocks)
8,537,871✔
773
{
774
  IF_CANCEL(int cancel_state;)
775

776
  GC_ASSERT(I_HOLD_LOCK());
8,537,871✔
777
  GC_ASSERT(GC_is_initialized);
8,537,871✔
778
  DISABLE_CANCEL(cancel_state);
8,537,871✔
779
  if (GC_incremental && GC_collection_in_progress()) {
8,541,381✔
780
    size_t i;
781
    size_t max_deficit = GC_rate * n_blocks;
3,510✔
782

783
    ENTER_GC();
3,510✔
784
#ifdef PARALLEL_MARK
785
    if (GC_time_limit != GC_TIME_UNLIMITED)
3,510✔
786
      GC_parallel_mark_disabled = TRUE;
×
787
#endif
788
    for (i = GC_mark_deficit; i < max_deficit; i++) {
45,522✔
789
      if (GC_mark_some(NULL))
42,099✔
790
        break;
87✔
791
    }
792
#ifdef PARALLEL_MARK
793
    GC_parallel_mark_disabled = FALSE;
3,510✔
794
#endif
795
    EXIT_GC();
3,510✔
796

797
    if (i < max_deficit && !GC_dont_gc) {
3,510✔
798
      GC_ASSERT(!GC_collection_in_progress());
87✔
799
      /* Need to follow up with a full collection. */
800
      SAVE_CALLERS_TO_LAST_STACK();
801
#ifdef PARALLEL_MARK
802
      if (GC_parallel)
87✔
803
        GC_wait_for_reclaim();
87✔
804
#endif
805
#ifndef NO_CLOCK
806
      if (GC_time_limit != GC_TIME_UNLIMITED
87✔
807
          && GC_n_attempts < max_prior_attempts)
×
808
        GET_TIME(GC_start_time);
×
809
#endif
810
      if (GC_stopped_mark(GC_n_attempts < max_prior_attempts
87✔
811
                              ? GC_timeout_stop_func
812
                              : GC_never_stop_func)) {
813
        GC_finish_collection();
87✔
814
      } else {
815
        GC_n_attempts++;
×
816
      }
817
    }
818
    if (GC_mark_deficit > 0) {
3,510✔
819
      GC_mark_deficit
820
          = GC_mark_deficit > max_deficit ? GC_mark_deficit - max_deficit : 0;
×
821
    }
822
  } else if (!GC_dont_gc) {
8,534,361✔
823
    GC_maybe_gc();
8,515,910✔
824
  }
825
  RESTORE_CANCEL(cancel_state);
8,537,871✔
826
}
8,537,871✔
827

828
#if !defined(NO_FIND_LEAK) || !defined(SHORT_DBG_HDRS)
829
GC_INNER void (*GC_check_heap)(void) = 0;
830
GC_INNER void (*GC_print_all_smashed)(void) = 0;
831
#endif
832

833
GC_API int GC_CALL
834
GC_collect_a_little(void)
3,160,764✔
835
{
836
  int result;
837

838
  if (UNLIKELY(!GC_is_initialized))
3,160,764✔
839
    GC_init();
×
840
  LOCK();
3,160,764✔
841
  /*
842
   * Note: if the collection is in progress, this may do marking (not
843
   * stopping the world) even in case of disabled garbage collection.
844
   */
845
  GC_collect_a_little_inner(1);
3,160,764✔
846
  result = (int)GC_collection_in_progress();
3,160,764✔
847
  UNLOCK();
3,160,764✔
848
  if (GC_debugging_started && !result)
3,160,764✔
849
    GC_print_all_smashed();
×
850
  return result;
3,160,764✔
851
}
852

853
#ifdef THREADS
854
GC_API void GC_CALL
855
GC_stop_world_external(void)
3✔
856
{
857
  GC_ASSERT(GC_is_initialized);
3✔
858
  LOCK();
3✔
859
#  ifdef THREAD_LOCAL_ALLOC
860
  GC_ASSERT(!GC_world_stopped);
3✔
861
#  endif
862
  ENTER_GC();
3✔
863
  STOP_WORLD();
3✔
864
#  ifdef THREAD_LOCAL_ALLOC
865
  GC_world_stopped = TRUE;
3✔
866
#  endif
867
}
3✔
868

869
GC_API void GC_CALL
870
GC_start_world_external(void)
3✔
871
{
872
#  ifdef THREAD_LOCAL_ALLOC
873
  GC_ASSERT(GC_world_stopped);
3✔
874
  GC_world_stopped = FALSE;
3✔
875
#  else
876
  GC_ASSERT(GC_is_initialized);
877
#  endif
878
  START_WORLD();
3✔
879
  EXIT_GC();
3✔
880
  UNLOCK();
3✔
881
}
3✔
882
#endif /* THREADS */
883

884
#ifdef USE_MUNMAP
885
#  ifndef MUNMAP_THRESHOLD
886
#    define MUNMAP_THRESHOLD 7
887
#  endif
888
GC_INNER unsigned GC_unmap_threshold = MUNMAP_THRESHOLD;
889

890
#  define IF_USE_MUNMAP(x) x
891
#  define COMMA_IF_USE_MUNMAP(x) /* comma */ , x
892
#else
893
#  define IF_USE_MUNMAP(x)
894
#  define COMMA_IF_USE_MUNMAP(x)
895
#endif /* !USE_MUNMAP */
896

897
/*
898
 * We stop the world and mark from all roots.  If `stop_func()` ever
899
 * returns `TRUE`, we may fail and return `FALSE`.  Increment `GC_gc_no`
900
 * if we succeed.
901
 */
902
STATIC GC_bool
903
GC_stopped_mark(GC_stop_func stop_func)
31,323✔
904
{
905
  ptr_t cold_gc_frame = GC_approx_sp();
31,323✔
906
  unsigned abandoned_at;
907
#ifndef NO_CLOCK
908
  CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
31,323✔
909
  GC_bool start_time_valid = FALSE;
31,323✔
910
#endif
911

912
  GC_ASSERT(I_HOLD_LOCK());
31,323✔
913
  GC_ASSERT(GC_is_initialized);
31,323✔
914
  ENTER_GC();
31,323✔
915
#if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
916
  GC_add_current_malloc_heap();
917
#endif
918
#if defined(REGISTER_LIBRARIES_EARLY)
919
  GC_cond_register_dynamic_libraries();
31,323✔
920
#endif
921

922
#if !defined(GC_NO_FINALIZATION) && !defined(GC_TOGGLE_REFS_NOT_NEEDED)
923
  GC_process_togglerefs();
31,323✔
924
#endif
925

926
  /* Output blank line for convenience here. */
927
  GC_COND_LOG_PRINTF(
31,323✔
928
      "\n--> Marking for collection #%lu after %lu allocated bytes\n",
929
      (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
930
#ifndef NO_CLOCK
931
  if (GC_PRINT_STATS_FLAG || GC_measure_performance) {
31,323✔
932
    GET_TIME(start_time);
14,346✔
933
    start_time_valid = TRUE;
14,346✔
934
  }
935
#endif
936
#ifdef THREADS
937
  if (GC_on_collection_event)
31,323✔
938
    GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD);
×
939
#endif
940
  STOP_WORLD();
31,323✔
941
#ifdef THREADS
942
  if (GC_on_collection_event)
31,323✔
943
    GC_on_collection_event(GC_EVENT_POST_STOP_WORLD);
×
944
#  ifdef THREAD_LOCAL_ALLOC
945
  GC_world_stopped = TRUE;
31,323✔
946
#  elif defined(CPPCHECK)
947
  /* Workaround a warning about adjacent same `if` condition. */
948
  (void)0;
949
#  endif
950
#endif
951

952
#ifdef MAKE_BACK_GRAPH
953
  if (GC_print_back_height) {
954
    GC_build_back_graph();
955
  }
956
#endif
957

958
  /* Notify about marking from all roots. */
959
  if (GC_on_collection_event)
31,323✔
960
    GC_on_collection_event(GC_EVENT_MARK_START);
×
961

962
  /* Minimize junk left in my registers and on the stack. */
963
  GC_clear_a_few_frames();
31,323✔
964
  GC_noop6(0, 0, 0, 0, 0, 0);
31,323✔
965

966
  GC_initiate_gc();
31,323✔
967
#ifdef PARALLEL_MARK
968
  if (stop_func != GC_never_stop_func)
31,323✔
969
    GC_parallel_mark_disabled = TRUE;
22,391✔
970
#endif
971
  for (abandoned_at = 1; !(*stop_func)(); abandoned_at++) {
22,843,719✔
972
    if (GC_mark_some(cold_gc_frame)) {
22,843,632✔
973
#ifdef PARALLEL_MARK
974
      if (GC_parallel && GC_parallel_mark_disabled) {
31,236✔
975
        GC_COND_LOG_PRINTF("Stopped marking done after %u iterations"
14,206✔
976
                           " with disabled parallel marker\n",
977
                           abandoned_at - 1);
978
      }
979
#endif
980
      abandoned_at = 0;
31,236✔
981
      break;
31,236✔
982
    }
983
  }
984
#ifdef PARALLEL_MARK
985
  GC_parallel_mark_disabled = FALSE;
31,323✔
986
#endif
987

988
  if (abandoned_at > 0) {
31,323✔
989
    /* Give the mutator a chance. */
990
    GC_mark_deficit = abandoned_at - 1;
87✔
991
    /* TODO: Notify `GC_EVENT_MARK_ABANDON`. */
992
  } else {
993
    GC_gc_no++;
31,236✔
994
    /* Check all debugged objects for consistency. */
995
    if (GC_debugging_started)
31,236✔
996
      GC_check_heap();
291✔
997
    if (GC_on_collection_event)
31,236✔
998
      GC_on_collection_event(GC_EVENT_MARK_END);
×
999
  }
1000

1001
#ifdef THREADS
1002
  if (GC_on_collection_event)
31,323✔
1003
    GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
×
1004
#endif
1005
#ifdef THREAD_LOCAL_ALLOC
1006
  GC_world_stopped = FALSE;
31,323✔
1007
#endif
1008
  START_WORLD();
31,323✔
1009
#ifdef THREADS
1010
  if (GC_on_collection_event)
31,323✔
1011
    GC_on_collection_event(GC_EVENT_POST_START_WORLD);
×
1012
#endif
1013

1014
#ifndef NO_CLOCK
1015
  if (start_time_valid) {
31,323✔
1016
    CLOCK_TYPE current_time;
1017
    unsigned long time_diff, ns_frac_diff;
1018

1019
    /* TODO: Avoid code duplication from `GC_try_to_collect_inner`. */
1020
    GET_TIME(current_time);
14,346✔
1021
    time_diff = MS_TIME_DIFF(current_time, start_time);
14,346✔
1022
    ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
14,346✔
1023
    if (GC_measure_performance) {
14,346✔
1024
      GC_stopped_mark_total_time += time_diff; /*< may wrap */
14,346✔
1025
      GC_stopped_mark_total_ns_frac += (unsigned32)ns_frac_diff;
14,346✔
1026
      if (GC_stopped_mark_total_ns_frac >= (unsigned32)1000000UL) {
14,346✔
1027
        GC_stopped_mark_total_ns_frac -= (unsigned32)1000000UL;
6,393✔
1028
        GC_stopped_mark_total_time++;
6,393✔
1029
      }
1030
    }
1031

1032
    if (GC_PRINT_STATS_FLAG || GC_measure_performance) {
14,346✔
1033
      unsigned total_time = GC_world_stopped_total_time;
14,346✔
1034
      unsigned divisor = GC_world_stopped_total_divisor;
14,346✔
1035

1036
      /* Compute new world-stop delay total time. */
1037
      if (total_time > (((unsigned)-1) >> 1)
14,346✔
1038
          || divisor >= MAX_TOTAL_TIME_DIVISOR) {
14,346✔
1039
        /* Halve values if overflow occurs. */
1040
        total_time >>= 1;
28✔
1041
        divisor >>= 1;
28✔
1042
      }
1043
      total_time += time_diff < (((unsigned)-1) >> 1) ? (unsigned)time_diff
14,346✔
1044
                                                      : ((unsigned)-1) >> 1;
14,346✔
1045
      /* Update old `GC_world_stopped_total_time` and its divisor. */
1046
      GC_world_stopped_total_time = total_time;
14,346✔
1047
      GC_world_stopped_total_divisor = ++divisor;
14,346✔
1048
      if (GC_PRINT_STATS_FLAG && 0 == abandoned_at) {
14,346✔
1049
        GC_ASSERT(divisor != 0);
×
1050
        GC_log_printf(
×
1051
            "World-stopped marking took %lu ms %lu ns (%u ms in average)\n",
1052
            time_diff, ns_frac_diff, total_time / divisor);
1053
      }
1054
    }
1055
  }
1056
#endif
1057

1058
  EXIT_GC();
31,323✔
1059
  if (0 == abandoned_at)
31,323✔
1060
    return TRUE;
31,323✔
1061
  GC_COND_LOG_PRINTF("Abandoned stopped marking after %u iterations\n",
87✔
1062
                     abandoned_at - 1);
1063
  return FALSE;
87✔
1064
}
1065

1066
GC_INNER void
1067
GC_set_fl_marks(ptr_t q)
1,717,024✔
1068
{
1069
#ifdef GC_ASSERTIONS
1070
  ptr_t q2;
1071
#endif
1072
  struct hblk *h = HBLKPTR(q);
1,717,024✔
1073
  const struct hblk *last_h = h;
1,717,024✔
1074
  hdr *hhdr;
1075
#ifdef MARK_BIT_PER_OBJ
1076
  size_t sz;
1077
#endif
1078

1079
  GC_ASSERT(q != NULL);
1,717,024✔
1080
  hhdr = HDR(h);
1,717,024✔
1081
#ifdef MARK_BIT_PER_OBJ
1082
  sz = hhdr->hb_sz;
1083
#endif
1084
#ifdef GC_ASSERTIONS
1085
  q2 = (ptr_t)obj_link(q);
1,717,024✔
1086
#endif
1087
  for (;;) {
45,520,726✔
1088
    size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz);
47,237,750✔
1089

1090
    if (!mark_bit_from_hdr(hhdr, bit_no)) {
47,237,750✔
1091
      set_mark_bit_from_hdr(hhdr, bit_no);
23,841,714✔
1092
      INCR_MARKS(hhdr);
23,841,714✔
1093
    }
1094
    q = (ptr_t)obj_link(q);
47,237,750✔
1095
    if (NULL == q)
47,237,750✔
1096
      break;
1,717,024✔
1097
#ifdef GC_ASSERTIONS
1098
    /*
1099
     * Detect a cycle in the free list.  The algorithm is to have
1100
     * a "twice faster" iterator over the list which meets the first
1101
     * one in case of a cycle existing in the list.
1102
     */
1103
    if (q2 != NULL) {
45,520,726✔
1104
      q2 = (ptr_t)obj_link(q2);
23,172,304✔
1105
      GC_ASSERT(q2 != q);
23,172,304✔
1106
      if (q2 != NULL) {
23,172,304✔
1107
        q2 = (ptr_t)obj_link(q2);
22,348,422✔
1108
        GC_ASSERT(q2 != q);
22,348,422✔
1109
      }
1110
    }
1111
#endif
1112

1113
    h = HBLKPTR(q);
45,520,726✔
1114
    if (UNLIKELY(h != last_h)) {
45,520,726✔
1115
      last_h = h;
194,081✔
1116
      /* Update `hhdr` and `sz`. */
1117
      hhdr = HDR(h);
194,081✔
1118
#ifdef MARK_BIT_PER_OBJ
1119
      sz = hhdr->hb_sz;
1120
#endif
1121
    }
1122
  }
1123
}
1,717,024✔
1124

1125
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1126
/*
1127
 * Check that all mark bits for the free list, whose first entry is
1128
 * `*pfreelist`, are set.  The check is skipped if `*pfreelist` points to
1129
 * a special value.
1130
 */
1131
void
1132
GC_check_fl_marks(void **pfreelist)
24,747,936✔
1133
{
1134
  /*
1135
   * TODO: There is a data race with `GC_FAST_MALLOC_GRANS` (which does
1136
   * not do atomic updates to the free-list).  The race seems to be
1137
   * harmless, and for now we just skip this check in case of TSan.
1138
   */
1139
#  if defined(AO_HAVE_load_acquire_read) && !defined(THREAD_SANITIZER)
1140
  ptr_t list = GC_cptr_load_acquire_read((volatile ptr_t *)pfreelist);
24,747,936✔
1141
  /* Atomic operations are used because the world is running. */
1142
  ptr_t p, prev, next;
1143

1144
  if (ADDR(list) <= HBLKSIZE)
24,747,936✔
1145
    return;
23,031,506✔
1146

1147
  prev = (ptr_t)pfreelist;
1,716,430✔
1148
  for (p = list; p != NULL; p = next) {
48,928,492✔
1149
    if (!GC_is_marked(p)) {
47,212,062✔
1150
      ABORT_ARG2("Unmarked local free-list entry", ": object %p on list %p",
×
1151
                 (void *)p, (void *)list);
1152
    }
1153

1154
    /*
1155
     * While traversing the free-list, it re-reads the pointer to the
1156
     * current node before accepting its next pointer and bails out
1157
     * if the latter has changed.  That way, it will not try to follow
1158
     * the pointer which might be been modified after the object was
1159
     * returned to the client.  It might perform the mark-check on the
1160
     * just allocated object but that should be harmless.
1161
     */
1162
    next = GC_cptr_load_acquire_read((volatile ptr_t *)p);
47,212,062✔
1163
    if (GC_cptr_load((volatile ptr_t *)prev) != p)
47,212,062✔
1164
      break;
×
1165
    prev = p;
47,212,062✔
1166
  }
1167
#  else
1168
  /* FIXME: Not implemented (just skipped). */
1169
  (void)pfreelist;
1170
#  endif
1171
}
1172
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
1173

1174
/*
1175
 * Clear all mark bits for the free list (specified by the first entry).
1176
 * Decrement `GC_bytes_found` by number of bytes on free list.
1177
 */
1178
STATIC void
1179
GC_clear_fl_marks(ptr_t q)
175,343✔
1180
{
1181
  struct hblk *h = HBLKPTR(q);
175,343✔
1182
  const struct hblk *last_h = h;
175,343✔
1183
  hdr *hhdr = HDR(h);
175,343✔
1184
  size_t sz = hhdr->hb_sz; /*< normally set only once */
175,343✔
1185

1186
  for (;;) {
252,731,680✔
1187
    size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz);
252,907,023✔
1188

1189
    if (mark_bit_from_hdr(hhdr, bit_no)) {
252,907,023✔
1190
      size_t n_marks = hhdr->hb_n_marks;
9,093,421✔
1191

1192
#ifdef LINT2
1193
      if (0 == n_marks)
1194
        ABORT("hhdr->hb_n_marks cannot be zero");
1195
#else
1196
      GC_ASSERT(n_marks != 0);
9,093,421✔
1197
#endif
1198
      clear_mark_bit_from_hdr(hhdr, bit_no);
9,093,421✔
1199
      n_marks--;
9,093,421✔
1200
#ifdef PARALLEL_MARK
1201
      /* Approximate count, do not decrement to zero! */
1202
      if (n_marks != 0 || !GC_parallel) {
9,093,421✔
1203
        hhdr->hb_n_marks = n_marks;
9,085,096✔
1204
      }
1205
#else
1206
      hhdr->hb_n_marks = n_marks;
1207
#endif
1208
    }
1209
    GC_bytes_found -= (GC_signed_word)sz;
252,907,023✔
1210

1211
    q = (ptr_t)obj_link(q);
252,907,023✔
1212
    if (NULL == q)
252,907,023✔
1213
      break;
175,343✔
1214

1215
    h = HBLKPTR(q);
252,731,680✔
1216
    if (UNLIKELY(h != last_h)) {
252,731,680✔
1217
      last_h = h;
4,498,333✔
1218
      /* Update `hhdr` and `sz`. */
1219
      hhdr = HDR(h);
4,498,333✔
1220
      sz = hhdr->hb_sz;
4,498,333✔
1221
    }
1222
  }
1223
}
175,343✔
1224

1225
/* Mark all objects on the free lists for every object kind. */
1226
static void
1227
set_all_fl_marks(void)
66✔
1228
{
1229
  unsigned kind;
1230

1231
  for (kind = 0; kind < GC_n_kinds; kind++) {
330✔
1232
    size_t lg;
1233

1234
    for (lg = 1; lg <= MAXOBJGRANULES; lg++) {
34,056✔
1235
      ptr_t q = (ptr_t)GC_obj_kinds[kind].ok_freelist[lg];
33,792✔
1236

1237
      if (q != NULL)
33,792✔
1238
        GC_set_fl_marks(q);
562✔
1239
    }
1240
  }
1241
}
66✔
1242

1243
/*
1244
 * Clear free-list mark bits.  Also subtract memory remaining from
1245
 * `GC_bytes_found` count.
1246
 */
1247
static void
1248
clear_all_fl_marks(void)
31,236✔
1249
{
1250
  unsigned kind;
1251

1252
  for (kind = 0; kind < GC_n_kinds; kind++) {
184,914✔
1253
    size_t lg;
1254

1255
    for (lg = 1; lg <= MAXOBJGRANULES; lg++) {
19,824,462✔
1256
      ptr_t q = (ptr_t)GC_obj_kinds[kind].ok_freelist[lg];
19,670,784✔
1257

1258
      if (q != NULL)
19,670,784✔
1259
        GC_clear_fl_marks(q);
175,343✔
1260
    }
1261
  }
1262
}
31,236✔
1263

1264
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1265
void GC_check_tls(void);
1266
#endif
1267

1268
GC_on_heap_resize_proc GC_on_heap_resize = 0;
1269

1270
/* Used for logging only. */
1271
GC_INLINE int
1272
GC_compute_heap_usage_percent(void)
×
1273
{
1274
  word used = GC_composite_in_use + GC_atomic_in_use + GC_bytes_allocd;
×
1275
  word heap_sz = GC_heapsize - GC_unmapped_bytes;
×
1276

1277
  return used >= heap_sz            ? 0
1278
         : used < GC_WORD_MAX / 100 ? (int)((used * 100) / heap_sz)
×
1279
                                    : (int)(used / (heap_sz / 100));
×
1280
}
1281

1282
#define GC_DBGLOG_PRINT_HEAP_IN_USE()                                        \
1283
  GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n", \
1284
                   GC_compute_heap_usage_percent(),                          \
1285
                   TO_KiB_UL(GC_composite_in_use),                           \
1286
                   TO_KiB_UL(GC_atomic_in_use + GC_bytes_allocd))
1287

1288
/*
1289
 * Finish up a collection.  Assumes mark bits are consistent, but the
1290
 * world is otherwise running.
1291
 */
1292
STATIC void
1293
GC_finish_collection(void)
31,236✔
1294
{
1295
#ifndef NO_CLOCK
1296
  CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
31,236✔
1297
  CLOCK_TYPE finalize_time = CLOCK_TYPE_INITIALIZER;
31,236✔
1298
#endif
1299

1300
  GC_ASSERT(I_HOLD_LOCK());
31,236✔
1301
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC) \
1302
    && !defined(DBG_HDRS_ALL)
1303
  /* Check that we marked some of our own data. */
1304
  GC_check_tls();
31,236✔
1305
  /* TODO: Add more checks. */
1306
#endif
1307

1308
#ifndef NO_CLOCK
1309
  if (GC_print_stats)
31,236✔
1310
    GET_TIME(start_time);
×
1311
#endif
1312
  if (GC_on_collection_event)
31,236✔
1313
    GC_on_collection_event(GC_EVENT_RECLAIM_START);
×
1314

1315
#ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1316
  if (GC_bytes_found > 0)
31,236✔
1317
    GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
30,204✔
1318
#endif
1319
  GC_bytes_found = 0;
31,236✔
1320
#if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1321
  if (GETENV("GC_PRINT_ADDRESS_MAP") != NULL) {
31,236✔
1322
    GC_print_address_map();
×
1323
  }
1324
#endif
1325
  COND_DUMP;
31,236✔
1326
  if (GC_find_leak_inner) {
31,236✔
1327
    set_all_fl_marks();
66✔
1328
    /* This just checks; it does not really reclaim anything. */
1329
    GC_start_reclaim(TRUE);
66✔
1330
  }
1331

1332
#ifndef GC_NO_FINALIZATION
1333
  GC_finalize();
31,236✔
1334
#endif
1335
#ifndef NO_CLOCK
1336
  if (GC_print_stats)
31,236✔
1337
    GET_TIME(finalize_time);
×
1338
#endif
1339
#ifdef MAKE_BACK_GRAPH
1340
  if (GC_print_back_height) {
1341
    GC_traverse_back_graph();
1342
  }
1343
#endif
1344

1345
  /*
1346
   * Clear free-list mark bits, in case they got accidentally marked
1347
   * (or `GC_find_leak` is set and they were intentionally marked).
1348
   * Note that composite objects on free list are cleared, thus
1349
   * accidentally marking a free list is not a problem; but some objects
1350
   * on the list itself might be marked, and the given function call
1351
   * fixes it.
1352
   */
1353
  clear_all_fl_marks();
31,236✔
1354

1355
  GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
31,236✔
1356
                        (long)GC_bytes_found);
×
1357

1358
  /* Reconstruct free lists to contain everything not marked. */
1359
  GC_start_reclaim(FALSE);
31,236✔
1360

1361
#ifdef USE_MUNMAP
1362
  if (GC_unmap_threshold > 0    /*< memory unmapping enabled? */
31,236✔
1363
      && LIKELY(GC_gc_no != 1)) /*< do not unmap during `GC_init` */
31,236✔
1364
    GC_unmap_old(GC_unmap_threshold);
31,197✔
1365

1366
  GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
31,236✔
1367
#endif
1368
  GC_ASSERT(GC_our_mem_bytes >= GC_heapsize);
31,236✔
1369
  GC_DBGLOG_PRINTF(
31,236✔
1370
      "GC #%lu freed %ld bytes, heap %lu KiB (" IF_USE_MUNMAP(
1371
          "+ %lu KiB unmapped ") "+ %lu KiB internal)\n",
1372
      (unsigned long)GC_gc_no, (long)GC_bytes_found,
×
1373
      TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
×
1374
      COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)),
×
1375
      TO_KiB_UL(GC_our_mem_bytes - GC_heapsize + sizeof(GC_arrays)));
×
1376
  GC_DBGLOG_PRINT_HEAP_IN_USE();
31,236✔
1377
  if (GC_is_full_gc) {
31,236✔
1378
    GC_used_heap_size_after_full = GC_heapsize - GC_large_free_bytes;
13,544✔
1379
    GC_need_full_gc = FALSE;
13,544✔
1380
  } else {
1381
    GC_need_full_gc = GC_heapsize - GC_used_heap_size_after_full
35,384✔
1382
                      > min_bytes_allocd() + GC_large_free_bytes;
17,692✔
1383
  }
1384

1385
  /* Reset or increment counters for next cycle. */
1386
  GC_n_attempts = 0;
31,236✔
1387
  GC_is_full_gc = FALSE;
31,236✔
1388
  GC_bytes_allocd_before_gc += GC_bytes_allocd;
31,236✔
1389
  GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
31,236✔
1390
  GC_bytes_allocd = 0;
31,236✔
1391
  GC_bytes_dropped = 0;
31,236✔
1392
  GC_bytes_freed = 0;
31,236✔
1393
  GC_finalizer_bytes_freed = 0;
31,236✔
1394

1395
  if (GC_on_collection_event)
31,236✔
1396
    GC_on_collection_event(GC_EVENT_RECLAIM_END);
×
1397
#ifndef NO_CLOCK
1398
  if (GC_print_stats) {
31,236✔
1399
    CLOCK_TYPE done_time;
1400

1401
    GET_TIME(done_time);
×
1402
#  if !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1403
    /* A convenient place to output finalization statistics. */
1404
    GC_print_finalization_stats();
×
1405
#  endif
1406
    GC_log_printf(
×
1407
        "Finalize and initiate sweep took %lu ms %lu ns + %lu ms %lu ns\n",
1408
        MS_TIME_DIFF(finalize_time, start_time),
×
1409
        NS_FRAC_TIME_DIFF(finalize_time, start_time),
×
1410
        MS_TIME_DIFF(done_time, finalize_time),
×
1411
        NS_FRAC_TIME_DIFF(done_time, finalize_time));
×
1412
  }
1413
#elif !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1414
  if (GC_print_stats)
1415
    GC_print_finalization_stats();
1416
#endif
1417
}
31,236✔
1418

1419
/* Note: if `stop_func` is 0, then `GC_default_stop_func` is used instead. */
1420
STATIC GC_bool
1421
GC_try_to_collect_general(GC_stop_func stop_func, GC_bool force_unmap)
3,652✔
1422
{
1423
  GC_bool result;
1424
#ifdef USE_MUNMAP
1425
  unsigned old_unmap_threshold;
1426
#endif
1427
  IF_CANCEL(int cancel_state;)
1428

1429
  if (UNLIKELY(!GC_is_initialized))
3,652✔
1430
    GC_init();
×
1431
  if (GC_debugging_started)
3,652✔
1432
    GC_print_all_smashed();
190✔
1433
  GC_notify_or_invoke_finalizers();
3,652✔
1434
  LOCK();
3,652✔
1435
  if (force_unmap) {
3,652✔
1436
    /*
1437
     * Record current heap size to make heap growth more conservative
1438
     * afterwards (as if the heap is growing from zero size again).
1439
     */
1440
    GC_heapsize_at_forced_unmap = GC_heapsize;
105✔
1441
  }
1442
  DISABLE_CANCEL(cancel_state);
3,652✔
1443
#ifdef USE_MUNMAP
1444
  old_unmap_threshold = GC_unmap_threshold;
3,652✔
1445
  if (force_unmap || (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
3,652✔
1446
    GC_unmap_threshold = 1; /*< unmap as much as possible */
105✔
1447
#endif
1448
  /* Minimize junk left in my registers. */
1449
  GC_noop6(0, 0, 0, 0, 0, 0);
3,652✔
1450
  result = GC_try_to_collect_inner(stop_func != 0 ? stop_func
3,652✔
1451
                                                  : GC_default_stop_func);
1452
#ifdef USE_MUNMAP
1453
  /* Restore it. */
1454
  GC_unmap_threshold = old_unmap_threshold;
3,652✔
1455
#endif
1456
  RESTORE_CANCEL(cancel_state);
3,652✔
1457
  UNLOCK();
3,652✔
1458
  if (result) {
3,652✔
1459
    if (GC_debugging_started)
3,606✔
1460
      GC_print_all_smashed();
190✔
1461
    GC_notify_or_invoke_finalizers();
3,606✔
1462
  }
1463
  return result;
3,652✔
1464
}
1465

1466
/* Externally callable routines to invoke full, stop-the-world collection. */
1467

1468
GC_API int GC_CALL
1469
GC_try_to_collect(GC_stop_func stop_func)
×
1470
{
1471
  GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
×
1472
  return (int)GC_try_to_collect_general(stop_func, FALSE);
×
1473
}
1474

1475
GC_API void GC_CALL
1476
GC_gcollect(void)
3,547✔
1477
{
1478
  /*
1479
   * Zero is passed as stop_func to get `GC_default_stop_func` value
1480
   * while holding the allocator lock (to prevent data race).
1481
   */
1482
  (void)GC_try_to_collect_general(0, FALSE);
3,547✔
1483
  if (get_have_errors())
3,547✔
1484
    GC_print_all_errors();
55✔
1485
}
3,547✔
1486

1487
GC_API void GC_CALL
1488
GC_gcollect_and_unmap(void)
105✔
1489
{
1490
  /* Collect and force memory unmapping to OS. */
1491
  (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
105✔
1492
}
105✔
1493

1494
STATIC GC_on_os_get_mem_proc GC_on_os_get_mem = 0;
1495

1496
GC_API void GC_CALL
1497
GC_set_on_os_get_mem(GC_on_os_get_mem_proc fn)
3✔
1498
{
1499
  /* `fn` may be 0 (means no event notifier). */
1500
  LOCK();
3✔
1501
  GC_on_os_get_mem = fn;
3✔
1502
  UNLOCK();
3✔
1503
}
3✔
1504

1505
GC_API GC_on_os_get_mem_proc GC_CALL
1506
GC_get_on_os_get_mem(void)
3✔
1507
{
1508
  GC_on_os_get_mem_proc fn;
1509

1510
  READER_LOCK();
3✔
1511
  fn = GC_on_os_get_mem;
3✔
1512
  READER_UNLOCK();
3✔
1513
  return fn;
3✔
1514
}
1515

1516
GC_INNER ptr_t
1517
GC_os_get_mem(size_t bytes)
2,530✔
1518
{
1519
  ptr_t space;
1520

1521
  GC_ASSERT(I_HOLD_LOCK());
2,530✔
1522
  space = (ptr_t)GET_MEM(bytes); /*< `HBLKSIZE`-aligned */
2,530✔
1523
  if (GC_on_os_get_mem)
2,530✔
1524
    (*GC_on_os_get_mem)(space, bytes);
×
1525
  if (UNLIKELY(NULL == space))
2,530✔
1526
    return NULL;
×
1527
#ifdef USE_PROC_FOR_LIBRARIES
1528
  /* Add `HBLKSIZE`-aligned `GET_MEM`-generated block to `GC_our_memory`. */
1529
  if (GC_n_memory >= MAX_HEAP_SECTS)
1530
    ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1531
  GC_our_memory[GC_n_memory].hs_start = space;
1532
  GC_our_memory[GC_n_memory].hs_bytes = bytes;
1533
  GC_n_memory++;
1534
#endif
1535
  GC_our_mem_bytes += bytes;
2,530✔
1536
  GC_VERBOSE_LOG_PRINTF("Got %lu bytes from OS\n", (unsigned long)bytes);
2,530✔
1537
  return space;
2,530✔
1538
}
1539

1540
/*
1541
 * Use the chunk of memory starting at `h` of size `sz` as part of the heap.
1542
 * Assumes `h` is `HBLKSIZE`-aligned, `sz` is a multiple of `HBLKSIZE`.
1543
 */
1544
STATIC void
1545
GC_add_to_heap(struct hblk *h, size_t sz)
916✔
1546
{
1547
  hdr *hhdr;
1548
  ptr_t endp;
1549
  size_t old_capacity = 0;
916✔
1550
  void *old_heap_sects = NULL;
916✔
1551
#ifdef GC_ASSERTIONS
1552
  size_t i;
1553
#endif
1554

1555
  GC_ASSERT(I_HOLD_LOCK());
916✔
1556
  GC_ASSERT(ADDR(h) % HBLKSIZE == 0);
916✔
1557
  GC_ASSERT(sz % HBLKSIZE == 0);
916✔
1558
  GC_ASSERT(sz > 0);
916✔
1559
  GC_ASSERT(GC_all_nils != NULL);
916✔
1560

1561
  if (UNLIKELY(GC_n_heap_sects == GC_capacity_heap_sects)) {
916✔
1562
    /* Allocate new `GC_heap_sects` with sufficient capacity. */
1563
#ifndef INITIAL_HEAP_SECTS
1564
#  define INITIAL_HEAP_SECTS 32
1565
#endif
1566
    size_t new_capacity
49✔
1567
        = GC_n_heap_sects > 0 ? GC_n_heap_sects * 2 : INITIAL_HEAP_SECTS;
49✔
1568
    void *new_heap_sects
1569
        = GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
49✔
1570

1571
    if (NULL == new_heap_sects) {
49✔
1572
      /* Retry with smaller yet sufficient capacity. */
1573
      new_capacity = GC_n_heap_sects + INITIAL_HEAP_SECTS;
×
1574
      new_heap_sects
1575
          = GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
×
1576
      if (NULL == new_heap_sects)
×
1577
        ABORT("Insufficient memory for heap sections");
×
1578
    }
1579
    old_capacity = GC_capacity_heap_sects;
49✔
1580
    old_heap_sects = GC_heap_sects;
49✔
1581
    /* Transfer `GC_heap_sects` contents to the newly allocated array. */
1582
    if (GC_n_heap_sects > 0)
49✔
1583
      BCOPY(old_heap_sects, new_heap_sects,
10✔
1584
            GC_n_heap_sects * sizeof(struct HeapSect));
1585
    GC_capacity_heap_sects = new_capacity;
49✔
1586
    GC_heap_sects = (struct HeapSect *)new_heap_sects;
49✔
1587
    GC_COND_LOG_PRINTF("Grew heap sections array to %lu elements\n",
49✔
1588
                       (unsigned long)new_capacity);
1589
  }
1590

1591
  while (UNLIKELY(ADDR(h) <= HBLKSIZE)) {
916✔
1592
    /* Cannot handle memory near address zero. */
1593
    ++h;
×
1594
    sz -= HBLKSIZE;
×
1595
    if (0 == sz)
×
1596
      return;
×
1597
  }
1598
  while (UNLIKELY(ADDR(h) >= GC_WORD_MAX - sz)) {
916✔
1599
    /* Prevent overflow when calculating `endp`. */
1600
    sz -= HBLKSIZE;
×
1601
    if (0 == sz)
×
1602
      return;
×
1603
  }
1604
  endp = (ptr_t)h + sz;
916✔
1605

1606
  hhdr = GC_install_header(h);
916✔
1607
  if (UNLIKELY(NULL == hhdr)) {
916✔
1608
    /*
1609
     * This is extremely unlikely. Cannot add it.  This will almost
1610
     * certainly result in a `NULL` returned from the allocator, which
1611
     * is entirely appropriate.
1612
     */
1613
    return;
×
1614
  }
1615
#ifdef GC_ASSERTIONS
1616
  /* Ensure no intersection between sections. */
1617
  for (i = 0; i < GC_n_heap_sects; i++) {
28,620✔
1618
    ptr_t hs_start = GC_heap_sects[i].hs_start;
27,704✔
1619
    ptr_t hs_end = hs_start + GC_heap_sects[i].hs_bytes;
27,704✔
1620

1621
    GC_ASSERT(!(ADDR_INSIDE((ptr_t)h, hs_start, hs_end)
27,704✔
1622
                || (ADDR_LT(hs_start, endp) && ADDR_GE(hs_end, endp))
1623
                || (ADDR_LT((ptr_t)h, hs_start) && ADDR_LT(hs_end, endp))));
1624
  }
1625
#endif
1626
  GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)h;
916✔
1627
  GC_heap_sects[GC_n_heap_sects].hs_bytes = sz;
916✔
1628
  GC_n_heap_sects++;
916✔
1629
  hhdr->hb_block = h;
916✔
1630
  hhdr->hb_sz = sz;
916✔
1631
  hhdr->hb_flags = 0;
916✔
1632
  GC_freehblk(h);
916✔
1633
  GC_heapsize += sz;
916✔
1634

1635
  if (ADDR_GE((ptr_t)GC_least_plausible_heap_addr, (ptr_t)h)
916✔
1636
      || UNLIKELY(NULL == GC_least_plausible_heap_addr)) {
875✔
1637
    /*
1638
     * Making it a little smaller than necessary prevents us from
1639
     * getting a false hit from the variable itself.  There is some
1640
     * unintentional reflection here.
1641
     */
1642
    GC_least_plausible_heap_addr = (ptr_t)h - sizeof(ptr_t);
41✔
1643
  }
1644
  if (ADDR_LT((ptr_t)GC_greatest_plausible_heap_addr, endp)) {
916✔
1645
    GC_greatest_plausible_heap_addr = endp;
×
1646
  }
1647
#ifdef SET_REAL_HEAP_BOUNDS
1648
  if (ADDR(h) < GC_least_real_heap_addr
916✔
1649
      || UNLIKELY(0 == GC_least_real_heap_addr))
291✔
1650
    GC_least_real_heap_addr = ADDR(h) - sizeof(ptr_t);
664✔
1651
  if (GC_greatest_real_heap_addr < ADDR(endp)) {
916✔
1652
#  ifdef INCLUDE_LINUX_THREAD_DESCR
1653
    /* Avoid heap intersection with the static data roots. */
1654
    GC_exclude_static_roots_inner((ptr_t)h, endp);
1655
#  endif
1656
    GC_greatest_real_heap_addr = ADDR(endp);
60✔
1657
  }
1658
#endif
1659
  GC_handle_protected_regions_limit();
916✔
1660
  if (UNLIKELY(old_capacity > 0)) {
916✔
1661
#ifndef GWW_VDB
1662
    /*
1663
     * Recycling may call `GC_add_to_heap()` again but should not cause
1664
     * resizing of `GC_heap_sects`.
1665
     */
1666
    GC_scratch_recycle_no_gww(old_heap_sects,
10✔
1667
                              old_capacity * sizeof(struct HeapSect));
1668
#else
1669
    /* TODO: Implement GWW-aware recycling as in `alloc_mark_stack`. */
1670
    GC_noop1_ptr(old_heap_sects);
1671
#endif
1672
  }
1673
}
1674

1675
#ifndef NO_DEBUGGING
1676
void
1677
GC_print_heap_sects(void)
3✔
1678
{
1679
  size_t i;
1680

1681
  GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
3✔
1682
            (unsigned long)GC_heapsize /*, */
3✔
1683
                COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
3✔
1684

1685
  for (i = 0; i < GC_n_heap_sects; i++) {
313✔
1686
    ptr_t start = GC_heap_sects[i].hs_start;
310✔
1687
    size_t len = GC_heap_sects[i].hs_bytes;
310✔
1688
    unsigned nbl = 0;
310✔
1689
#  ifndef NO_BLACK_LISTING
1690
    struct hblk *h;
1691

1692
    for (h = (struct hblk *)start; ADDR_LT((ptr_t)h, start + len); h++) {
57,076✔
1693
      if (GC_is_black_listed(h, HBLKSIZE))
56,766✔
1694
        nbl++;
16✔
1695
    }
1696
#  endif
1697
    GC_printf("Section %u from %p to %p %u/%lu blacklisted\n", (unsigned)i,
310✔
1698
              (void *)start, (void *)&start[len], nbl,
310✔
1699
              (unsigned long)divHBLKSZ(len));
310✔
1700
  }
1701
}
3✔
1702
#endif /* !NO_DEBUGGING */
1703

1704
void *GC_least_plausible_heap_addr = MAKE_CPTR(GC_WORD_MAX);
1705
void *GC_greatest_plausible_heap_addr = NULL;
1706

1707
STATIC word GC_max_heapsize = 0;
1708

1709
GC_API void GC_CALL
1710
GC_set_max_heap_size(GC_word n)
2✔
1711
{
1712
  GC_max_heapsize = n;
2✔
1713
}
2✔
1714

1715
word GC_max_retries = 0;
1716

1717
GC_INNER void
1718
GC_scratch_recycle_inner(void *ptr, size_t sz)
228✔
1719
{
1720
  size_t page_offset;
1721
  size_t displ = 0;
228✔
1722
  size_t recycled_bytes;
1723

1724
  GC_ASSERT(I_HOLD_LOCK());
228✔
1725
  if (NULL == ptr)
228✔
1726
    return;
×
1727

1728
  GC_ASSERT(sz != 0);
228✔
1729
  GC_ASSERT(GC_page_size != 0);
228✔
1730
  /* TODO: Assert correct memory flags if `GWW_VDB`. */
1731
  page_offset = ADDR(ptr) & (GC_page_size - 1);
228✔
1732
  if (page_offset != 0)
228✔
1733
    displ = GC_page_size - page_offset;
4✔
1734
  recycled_bytes = sz > displ ? (sz - displ) & ~(GC_page_size - 1) : 0;
228✔
1735
  GC_COND_LOG_PRINTF("Recycle %lu/%lu scratch-allocated bytes at %p\n",
228✔
1736
                     (unsigned long)recycled_bytes, (unsigned long)sz, ptr);
1737
  if (recycled_bytes > 0)
228✔
1738
    GC_add_to_heap((struct hblk *)((ptr_t)ptr + displ), recycled_bytes);
218✔
1739
}
1740

1741
GC_INNER GC_bool
1742
GC_expand_hp_inner(word n)
718✔
1743
{
1744
  size_t sz;
1745
  struct hblk *space;
1746
  /* Number of bytes by which we expect the heap to expand soon. */
1747
  word expansion_slop;
1748

1749
  GC_ASSERT(I_HOLD_LOCK());
718✔
1750
  GC_ASSERT(GC_page_size != 0);
718✔
1751
  if (0 == n)
718✔
1752
    n = 1;
3✔
1753
  sz = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
718✔
1754
  GC_DBGLOG_PRINT_HEAP_IN_USE();
718✔
1755
  if (GC_max_heapsize != 0
718✔
1756
      && (GC_max_heapsize < (word)sz
24✔
1757
          || GC_heapsize > GC_max_heapsize - (word)sz)) {
4✔
1758
    /* Exceeded the self-imposed limit. */
1759
    return FALSE;
20✔
1760
  }
1761
  space = (struct hblk *)GC_os_get_mem(sz);
698✔
1762
  if (UNLIKELY(NULL == space)) {
698✔
1763
    WARN("Failed to expand heap by %" WARN_PRIuPTR " KiB\n", sz >> 10);
×
1764
    return FALSE;
×
1765
  }
1766
  GC_last_heap_growth_gc_no = GC_gc_no;
698✔
1767
  GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
698✔
1768
                    TO_KiB_UL(GC_heapsize + sz),
×
1769
                    (unsigned long)GC_bytes_allocd);
×
1770

1771
  /*
1772
   * Adjust heap limits generously for black-listing to work better.
1773
   * `GC_add_to_heap()` performs minimal adjustment needed for correctness.
1774
   */
1775
  expansion_slop = min_bytes_allocd() + 4 * MAXHINCR * HBLKSIZE;
698✔
1776
  if ((0 == GC_last_heap_addr && (ADDR(space) & SIGNB) == 0)
698✔
1777
      || (GC_last_heap_addr != 0 && GC_last_heap_addr < ADDR(space))) {
659✔
1778
    /* Assume the heap is growing up. */
1779
    if (LIKELY(ADDR(space) < GC_WORD_MAX - (sz + expansion_slop))) {
102✔
1780
      ptr_t new_limit = (ptr_t)space + sz + expansion_slop;
51✔
1781

1782
      if (ADDR_LT((ptr_t)GC_greatest_plausible_heap_addr, new_limit))
51✔
1783
        GC_greatest_plausible_heap_addr = new_limit;
42✔
1784
    }
1785
  } else {
1786
    /* Heap is growing down. */
1787
    if (LIKELY(ADDR(space) > expansion_slop + sizeof(ptr_t))) {
647✔
1788
      ptr_t new_limit = (ptr_t)space - expansion_slop - sizeof(ptr_t);
647✔
1789

1790
      if (ADDR_LT(new_limit, (ptr_t)GC_least_plausible_heap_addr))
647✔
1791
        GC_least_plausible_heap_addr = new_limit;
517✔
1792
    }
1793
  }
1794
  GC_last_heap_addr = ADDR(space);
698✔
1795

1796
  GC_add_to_heap(space, sz);
698✔
1797
  if (GC_on_heap_resize)
698✔
1798
    (*GC_on_heap_resize)(GC_heapsize);
×
1799

1800
  return TRUE;
698✔
1801
}
1802

1803
GC_API int GC_CALL
1804
GC_expand_hp(size_t bytes)
5✔
1805
{
1806
  size_t n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(bytes);
5✔
1807
#ifndef NO_WARN_HEAP_GROW_WHEN_GC_DISABLED
1808
  word old_heapsize;
1809
#endif
1810
  GC_bool result;
1811

1812
  if (UNLIKELY(!GC_is_initialized))
5✔
1813
    GC_init();
×
1814
  LOCK();
5✔
1815
#ifndef NO_WARN_HEAP_GROW_WHEN_GC_DISABLED
1816
  old_heapsize = GC_heapsize;
5✔
1817
#endif
1818
  result = GC_expand_hp_inner(n_blocks);
5✔
1819
  if (result) {
5✔
1820
    GC_requested_heapsize += bytes;
5✔
1821
#ifndef NO_WARN_HEAP_GROW_WHEN_GC_DISABLED
1822
    if (GC_dont_gc) {
5✔
1823
      /* Do not call `WARN()` if the heap growth is intentional. */
1824
      GC_ASSERT(GC_heapsize >= old_heapsize);
3✔
1825
      GC_heapsize_on_gc_disable += GC_heapsize - old_heapsize;
3✔
1826
    }
1827
#endif
1828
  }
1829
  UNLOCK();
5✔
1830
  /*
1831
   * Really returns a `GC_bool` value, but the function is externally
1832
   * visible, so that is clumsy.
1833
   */
1834
  return (int)result;
5✔
1835
}
1836

1837
/*
1838
 * The minimum value of the ratio of allocated bytes since the latest
1839
 * collection to the amount of finalizers created since that collection
1840
 * which triggers the collection instead heap expansion.  Has no effect
1841
 * in the incremental mode.
1842
 */
1843
#ifdef GC_ALLOCD_BYTES_PER_FINALIZER
1844
STATIC word GC_allocd_bytes_per_finalizer = GC_ALLOCD_BYTES_PER_FINALIZER;
1845
#else
1846
STATIC word GC_allocd_bytes_per_finalizer = 10000;
1847
#endif
1848

1849
GC_API void GC_CALL
1850
GC_set_allocd_bytes_per_finalizer(GC_word value)
3✔
1851
{
1852
  GC_allocd_bytes_per_finalizer = value;
3✔
1853
}
3✔
1854

1855
GC_API GC_word GC_CALL
1856
GC_get_allocd_bytes_per_finalizer(void)
3✔
1857
{
1858
  return GC_allocd_bytes_per_finalizer;
3✔
1859
}
1860

1861
GC_INNER GC_bool
1862
GC_collect_or_expand(word needed_blocks, unsigned flags, GC_bool retry)
9,235✔
1863
{
1864
  static word last_fo_entries, last_bytes_finalized;
1865

1866
  GC_bool gc_not_stopped = TRUE;
9,235✔
1867
  word blocks_to_get;
1868
  IF_CANCEL(int cancel_state;)
1869

1870
  GC_ASSERT(I_HOLD_LOCK());
9,235✔
1871
  GC_ASSERT(GC_is_initialized);
9,235✔
1872
  DISABLE_CANCEL(cancel_state);
9,235✔
1873
  if (!GC_incremental && !GC_dont_gc
9,235✔
1874
      && ((GC_dont_expand && GC_bytes_allocd > 0)
8,640✔
1875
          || (GC_fo_entries > last_fo_entries
8,640✔
1876
              && (last_bytes_finalized | GC_bytes_finalized) != 0
474✔
1877
              && (GC_fo_entries - last_fo_entries)
444✔
1878
                         * GC_allocd_bytes_per_finalizer
444✔
1879
                     > GC_bytes_allocd)
444✔
1880
          || GC_should_collect())) {
8,196✔
1881
    /*
1882
     * Try to do a full collection using "default" `stop_func` (unless
1883
     * nothing has been allocated since the latest collection or heap
1884
     * expansion is disabled).
1885
     */
1886
    gc_not_stopped = GC_try_to_collect_inner(
8,561✔
1887
        GC_bytes_allocd > 0 && (!GC_dont_expand || !retry)
8,561✔
1888
            ? GC_default_stop_func
1889
            : GC_never_stop_func);
1890
    if (gc_not_stopped || !retry) {
8,561✔
1891
      /*
1892
       * Either the collection has not been aborted or this is the
1893
       * first attempt (in a loop).
1894
       */
1895
      last_fo_entries = GC_fo_entries;
8,561✔
1896
      last_bytes_finalized = GC_bytes_finalized;
8,561✔
1897
      RESTORE_CANCEL(cancel_state);
8,561✔
1898
      return TRUE;
9,235✔
1899
    }
1900
  }
1901

1902
  blocks_to_get = (GC_heapsize - GC_heapsize_at_forced_unmap)
674✔
1903
                      / (HBLKSIZE * GC_free_space_divisor)
674✔
1904
                  + needed_blocks;
1905
  if (blocks_to_get > MAXHINCR) {
674✔
1906
#ifdef NO_BLACK_LISTING
1907
    UNUSED_ARG(flags);
1908
    blocks_to_get = needed_blocks > MAXHINCR ? needed_blocks : MAXHINCR;
1909
#else
1910
    word slop;
1911

1912
    /*
1913
     * Get the minimum required to make it likely that we can satisfy
1914
     * the current request in the presence of black-listing.  This will
1915
     * probably be bigger than `MAXHINCR`.
1916
     */
1917
    if ((flags & IGNORE_OFF_PAGE) != 0) {
68✔
1918
      slop = 4;
×
1919
    } else {
1920
      slop = 2 * divHBLKSZ(BL_LIMIT);
68✔
1921
      if (slop > needed_blocks)
68✔
1922
        slop = needed_blocks;
48✔
1923
    }
1924
    if (needed_blocks + slop > MAXHINCR) {
68✔
1925
      blocks_to_get = needed_blocks + slop;
20✔
1926
    } else {
1927
      blocks_to_get = MAXHINCR;
48✔
1928
    }
1929
#endif
1930
    if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
68✔
1931
      blocks_to_get = divHBLKSZ(GC_WORD_MAX);
10✔
1932
  } else if (blocks_to_get < MINHINCR) {
606✔
1933
    blocks_to_get = MINHINCR;
139✔
1934
  }
1935

1936
  if (GC_max_heapsize > GC_heapsize) {
674✔
1937
    word max_get_blocks = divHBLKSZ(GC_max_heapsize - GC_heapsize);
20✔
1938
    if (blocks_to_get > max_get_blocks)
20✔
1939
      blocks_to_get
1940
          = max_get_blocks > needed_blocks ? max_get_blocks : needed_blocks;
20✔
1941
  }
1942

1943
#ifdef USE_MUNMAP
1944
  if (GC_unmap_threshold > 1) {
674✔
1945
    /*
1946
     * Return as much memory to the OS as possible before trying to
1947
     * get memory from it.
1948
     */
1949
    GC_unmap_old(0);
674✔
1950
  }
1951
#endif
1952
  if (!GC_expand_hp_inner(blocks_to_get)
674✔
1953
      && (blocks_to_get == needed_blocks
20✔
1954
          || !GC_expand_hp_inner(needed_blocks))) {
×
1955
    if (!gc_not_stopped) {
20✔
1956
      /* Do not increment `GC_alloc_fail_count` here (and no warning). */
1957
      GC_gcollect_inner();
×
1958
      GC_ASSERT(0 == GC_bytes_allocd);
×
1959
    } else if (GC_alloc_fail_count++ < GC_max_retries) {
20✔
1960
      WARN("Out of Memory!  Trying to continue...\n", 0);
×
1961
      GC_gcollect_inner();
×
1962
    } else {
1963
#ifdef USE_MUNMAP
1964
      GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
20✔
1965
#endif
1966
#if !defined(SMALL_CONFIG) && (CPP_WORDSZ >= 32)
1967
#  define MAX_HEAPSIZE_WARNED_IN_BYTES (5 << 20) /*< 5 MB */
1968

1969
      if (GC_heapsize > (word)MAX_HEAPSIZE_WARNED_IN_BYTES) {
20✔
1970
        WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " MiB."
×
1971
             " Returning NULL!\n",
1972
             (GC_heapsize - GC_unmapped_bytes) >> 20);
1973
      } else
1974
#endif
1975
      /* else */ {
1976
        WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " bytes."
20✔
1977
             " Returning NULL!\n",
1978
             GC_heapsize - GC_unmapped_bytes);
1979
      }
1980
      RESTORE_CANCEL(cancel_state);
20✔
1981
      return FALSE;
20✔
1982
    }
1983
  } else if (GC_alloc_fail_count > 0) {
654✔
1984
    GC_COND_LOG_PRINTF("Memory available again...\n");
×
1985
  }
1986
  RESTORE_CANCEL(cancel_state);
654✔
1987
  return TRUE;
654✔
1988
}
1989

1990
GC_INNER ptr_t
1991
GC_allocobj(size_t lg, int kind)
1,014,955✔
1992
{
1993
#define MAX_ALLOCOBJ_RETRIES 3
1994
  int retry_cnt = 0;
1,014,955✔
1995
  void **flh = &GC_obj_kinds[kind].ok_freelist[lg];
1,014,955✔
1996
#ifndef GC_DISABLE_INCREMENTAL
1997
  GC_bool tried_minor = FALSE;
1,014,955✔
1998
#endif
1999

2000
  GC_ASSERT(I_HOLD_LOCK());
1,014,955✔
2001
  GC_ASSERT(GC_is_initialized);
1,014,955✔
2002
  if (UNLIKELY(0 == lg))
1,014,955✔
2003
    return NULL;
×
2004

2005
  while (NULL == *flh) {
1,021,431✔
2006
    /*
2007
     * Only a few iterations are expected at most, otherwise something
2008
     * is wrong in one of the functions called below.
2009
     */
2010
    if (retry_cnt > MAX_ALLOCOBJ_RETRIES)
1,021,418✔
2011
      ABORT("Too many retries in GC_allocobj");
×
2012
#ifndef GC_DISABLE_INCREMENTAL
2013
    if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED && !GC_dont_gc) {
1,021,418✔
2014
      /*
2015
       * True incremental mode, not just generational.
2016
       * Do our share of marking work.
2017
       */
2018
      GC_collect_a_little_inner(1);
×
2019
    }
2020
#endif
2021
    /* Sweep blocks for objects of this size. */
2022
    GC_ASSERT(!GC_is_full_gc || NULL == GC_obj_kinds[kind].ok_reclaim_list
1,021,418✔
2023
              || NULL == GC_obj_kinds[kind].ok_reclaim_list[lg]);
2024
    GC_continue_reclaim(lg, kind);
1,021,418✔
2025
#if defined(CPPCHECK)
2026
    GC_noop1_ptr(&flh);
2027
#endif
2028
    if (*flh != NULL)
1,021,418✔
2029
      break;
183,508✔
2030

2031
    GC_new_hblk(lg, kind);
837,910✔
2032
#if defined(CPPCHECK)
2033
    GC_noop1_ptr(&flh);
2034
#endif
2035
    if (*flh != NULL)
837,910✔
2036
      break;
831,434✔
2037

2038
#ifndef GC_DISABLE_INCREMENTAL
2039
    if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED && !tried_minor
6,476✔
2040
        && !GC_dont_gc) {
323✔
2041
      GC_collect_a_little_inner(1);
302✔
2042
      tried_minor = TRUE;
302✔
2043
      continue;
302✔
2044
    }
2045
#endif
2046
    if (UNLIKELY(!GC_collect_or_expand(1, 0 /* `flags` */, retry_cnt > 0)))
6,174✔
2047
      return NULL;
×
2048
    retry_cnt++;
6,174✔
2049
  }
2050
  /* Successful allocation; reset failure count. */
2051
  GC_alloc_fail_count = 0;
1,014,955✔
2052
  return (ptr_t)(*flh);
1,014,955✔
2053
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc