• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1504

13 May 2023 10:04AM UTC coverage: 76.536% (-0.3%) from 76.817%
1504

push

travis-ci-com

ivmai
Make comparisons to the lowest heap boundary strict

* alloc.c (GC_expand_hp_inner): Subtract sizeof(word) from new_limit
(for the case when comparing to GC_least_plausible_heap_addr).
* backgraph.c [MAKE_BACK_GRAPH] (add_back_edges): Compare current to
GC_least_real_heap_addr and GC_greatest_real_heap_addr (instead of
GC_least_plausible_heap_addr and GC_greatest_plausible_heap_addr).
* malloc.c [GC_ASSERTIONS] (GC_malloc_kind_global): Likewise.
* include/gc/gc_mark.h (GC_MARK_AND_PUSH): Replace greater-or-equal to
strictly greater when comparing to GC_least_plausible_heap_addr.
* include/private/gc_pmark.h (GC_PUSH_ONE_STACK, GC_PUSH_ONE_HEAP):
Likewise.
* include/private/gc_priv.h [MAKE_BACK_GRAPH] (SET_REAL_HEAP_BOUNDS,
GC_least_real_heap_addr, GC_greatest_real_heap_addr): Define.
* mark.c (GC_mark_from, GC_push_all): Add dummy "| GC_DS_LENGTH" when
storing length to mse_descr.w.
* mark.c (GC_mark_from): Replace greater-or-equal to strictly greater
when comparing to least_ha variable.
* typd_mlc.c (GC_typed_mark_proc): Likewise.
* mark.c [GC_DS_TAGS>ALIGNMENT-1] (GC_push_all): Simplify code to
round up length.

10 of 10 new or added lines in 4 files covered. (100.0%)

7809 of 10203 relevant lines covered (76.54%)

8248744.64 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

79.1
/alloc.c
1
/*
2
 * Copyright (c) 1988-1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 1999-2011 Hewlett-Packard Development Company, L.P.
6
 * Copyright (c) 2008-2022 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 *
17
 */
18

19
#include "private/gc_priv.h"
20

21
#if !defined(MACOS) && !defined(MSWINCE)
22
# include <signal.h>
23
# if !defined(GC_NO_TYPES) && !defined(SN_TARGET_PSP2) \
24
     && !defined(__CC_ARM)
25
#   include <sys/types.h>
26
# endif
27
#endif
28

29
/*
30
 * Separate free lists are maintained for different sized objects
31
 * up to MAXOBJBYTES.
32
 * The call GC_allocobj(i,k) ensures that the freelist for
33
 * kind k objects of size i points to a non-empty
34
 * free list. It returns a pointer to the first entry on the free list.
35
 * In a single-threaded world, GC_allocobj may be called to allocate
36
 * an object of small size lb (and NORMAL kind) as follows
37
 * (GC_generic_malloc_inner is a wrapper over GC_allocobj which also
38
 * fills in GC_size_map if needed):
39
 *
40
 *   lg = GC_size_map[lb];
41
 *   op = GC_objfreelist[lg];
42
 *   if (NULL == op) {
43
 *     op = GC_generic_malloc_inner(lb, NORMAL, 0);
44
 *   } else {
45
 *     GC_objfreelist[lg] = obj_link(op);
46
 *     GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
47
 *   }
48
 *
49
 * Note that this is very fast if the free list is non-empty; it should
50
 * only involve the execution of 4 or 5 simple instructions.
51
 * All composite objects on freelists are cleared, except for
52
 * their first word.
53
 */
54

55
/*
56
 * The allocator uses GC_allochblk to allocate large chunks of objects.
57
 * These chunks all start on addresses which are multiples of
58
 * HBLKSZ.   Each allocated chunk has an associated header,
59
 * which can be located quickly based on the address of the chunk.
60
 * (See headers.c for details.)
61
 * This makes it possible to check quickly whether an
62
 * arbitrary address corresponds to an object administered by the
63
 * allocator.
64
 */
65

66
word GC_non_gc_bytes = 0;  /* Number of bytes not intended to be collected */
67

68
word GC_gc_no = 0;
69

70
#ifndef NO_CLOCK
71
  static unsigned long full_gc_total_time = 0; /* in ms, may wrap */
72
  static unsigned long stopped_mark_total_time = 0;
73
  static unsigned full_gc_total_ns_frac = 0; /* fraction of 1 ms */
74
  static unsigned stopped_mark_total_ns_frac = 0;
75
  static GC_bool measure_performance = FALSE;
76
                /* Do performance measurements if set to true (e.g.,    */
77
                /* accumulation of the total time of full collections). */
78

79
  GC_API void GC_CALL GC_start_performance_measurement(void)
2✔
80
  {
81
    measure_performance = TRUE;
2✔
82
  }
2✔
83

84
  GC_API unsigned long GC_CALL GC_get_full_gc_total_time(void)
2✔
85
  {
86
    return full_gc_total_time;
2✔
87
  }
88

89
  GC_API unsigned long GC_CALL GC_get_stopped_mark_total_time(void)
2✔
90
  {
91
    return stopped_mark_total_time;
2✔
92
  }
93
#endif /* !NO_CLOCK */
94

95
#ifndef GC_DISABLE_INCREMENTAL
96
  GC_INNER GC_bool GC_incremental = FALSE; /* By default, stop the world. */
97
  STATIC GC_bool GC_should_start_incremental_collection = FALSE;
98
#endif
99

100
GC_API int GC_CALL GC_is_incremental_mode(void)
16✔
101
{
102
  return (int)GC_incremental;
16✔
103
}
104

105
#ifdef THREADS
106
  int GC_parallel = FALSE;      /* By default, parallel GC is off.      */
107
#endif
108

109
#if defined(GC_FULL_FREQ) && !defined(CPPCHECK)
110
  int GC_full_freq = GC_FULL_FREQ;
111
#else
112
  int GC_full_freq = 19;   /* Every 20th collection is a full   */
113
                           /* collection, whether we need it    */
114
                           /* or not.                           */
115
#endif
116

117
STATIC GC_bool GC_need_full_gc = FALSE;
118
                           /* Need full GC due to heap growth.  */
119

120
#ifdef THREAD_LOCAL_ALLOC
121
  GC_INNER GC_bool GC_world_stopped = FALSE;
122
#endif
123

124
STATIC GC_bool GC_disable_automatic_collection = FALSE;
125

126
GC_API void GC_CALL GC_set_disable_automatic_collection(int value)
2✔
127
{
128
  LOCK();
2✔
129
  GC_disable_automatic_collection = (GC_bool)value;
2✔
130
  UNLOCK();
2✔
131
}
2✔
132

133
GC_API int GC_CALL GC_get_disable_automatic_collection(void)
2✔
134
{
135
  int value;
136

137
  LOCK();
2✔
138
  value = (int)GC_disable_automatic_collection;
2✔
139
  UNLOCK();
2✔
140
  return value;
2✔
141
}
142

143
STATIC word GC_used_heap_size_after_full = 0;
144

145
/* Version macros are now defined in gc_version.h, which is included by */
146
/* gc.h, which is included by gc_priv.h.                                */
147
#ifndef GC_NO_VERSION_VAR
148
  EXTERN_C_BEGIN
149
  extern const unsigned GC_version;
150
  EXTERN_C_END
151
  const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
152
                        (GC_VERSION_MINOR << 8) | GC_VERSION_MICRO);
153
#endif
154

155
GC_API unsigned GC_CALL GC_get_version(void)
2✔
156
{
157
  return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
2✔
158
          GC_VERSION_MICRO;
159
}
160

161
/* some more variables */
162

163
#ifdef GC_DONT_EXPAND
164
  int GC_dont_expand = TRUE;
165
#else
166
  int GC_dont_expand = FALSE;
167
#endif
168

169
#if defined(GC_FREE_SPACE_DIVISOR) && !defined(CPPCHECK)
170
  word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR; /* must be > 0 */
171
#else
172
  word GC_free_space_divisor = 3;
173
#endif
174

175
GC_INNER int GC_CALLBACK GC_never_stop_func(void)
7,508,134✔
176
{
177
  return FALSE;
7,508,134✔
178
}
179

180
#if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
181
  unsigned long GC_time_limit = GC_TIME_LIMIT;
182
                           /* We try to keep pause times from exceeding  */
183
                           /* this by much. In milliseconds.             */
184
#elif defined(PARALLEL_MARK)
185
  unsigned long GC_time_limit = GC_TIME_UNLIMITED;
186
                        /* The parallel marker cannot be interrupted for */
187
                        /* now, so the time limit is absent by default.  */
188
#else
189
  unsigned long GC_time_limit = 15;
190
#endif
191

192
#ifndef NO_CLOCK
193
  STATIC unsigned long GC_time_lim_nsec = 0;
194
                        /* The nanoseconds add-on to GC_time_limit      */
195
                        /* value.  Not updated by GC_set_time_limit().  */
196
                        /* Ignored if the value of GC_time_limit is     */
197
                        /* GC_TIME_UNLIMITED.                           */
198

199
# define TV_NSEC_LIMIT (1000UL * 1000) /* amount of nanoseconds in 1 ms */
200

201
  GC_API void GC_CALL GC_set_time_limit_tv(struct GC_timeval_s tv)
2✔
202
  {
203
    GC_ASSERT(tv.tv_ms <= GC_TIME_UNLIMITED);
2✔
204
    GC_ASSERT(tv.tv_nsec < TV_NSEC_LIMIT);
2✔
205
    GC_time_limit = tv.tv_ms;
2✔
206
    GC_time_lim_nsec = tv.tv_nsec;
2✔
207
  }
2✔
208

209
  GC_API struct GC_timeval_s GC_CALL GC_get_time_limit_tv(void)
2✔
210
  {
211
    struct GC_timeval_s tv;
212

213
    tv.tv_ms = GC_time_limit;
2✔
214
    tv.tv_nsec = GC_time_lim_nsec;
2✔
215
    return tv;
2✔
216
  }
217

218
  STATIC CLOCK_TYPE GC_start_time = CLOCK_TYPE_INITIALIZER;
219
                                /* Time at which we stopped world.      */
220
                                /* used only in GC_timeout_stop_func.   */
221
#endif /* !NO_CLOCK */
222

223
STATIC int GC_n_attempts = 0;   /* Number of attempts at finishing      */
224
                                /* collection within GC_time_limit.     */
225

226
STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
227
                                /* Accessed holding the allocator lock. */
228

229
GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
2✔
230
{
231
  GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
2✔
232
  LOCK();
2✔
233
  GC_default_stop_func = stop_func;
2✔
234
  UNLOCK();
2✔
235
}
2✔
236

237
GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
2✔
238
{
239
  GC_stop_func stop_func;
240

241
  LOCK();
2✔
242
  stop_func = GC_default_stop_func;
2✔
243
  UNLOCK();
2✔
244
  return stop_func;
2✔
245
}
246

247
#if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
248
# define GC_timeout_stop_func GC_default_stop_func
249
#else
250
  STATIC int GC_CALLBACK GC_timeout_stop_func(void)
6,605,572✔
251
  {
252
    CLOCK_TYPE current_time;
253
    static unsigned count = 0;
254
    unsigned long time_diff, nsec_diff;
255

256
    if (GC_default_stop_func())
6,605,572✔
257
      return TRUE;
6,605,572✔
258

259
    if (GC_time_limit == GC_TIME_UNLIMITED || (count++ & 3) != 0)
6,605,572✔
260
      return FALSE;
6,605,572✔
261

262
    GET_TIME(current_time);
×
263
    time_diff = MS_TIME_DIFF(current_time, GC_start_time);
×
264
    nsec_diff = NS_FRAC_TIME_DIFF(current_time, GC_start_time);
×
265
#   if defined(CPPCHECK)
266
      GC_noop1((word)&nsec_diff);
267
#   endif
268
    if (time_diff >= GC_time_limit
×
269
        && (time_diff > GC_time_limit || nsec_diff >= GC_time_lim_nsec)) {
×
270
      GC_COND_LOG_PRINTF("Abandoning stopped marking after %lu ms %lu ns"
×
271
                         " (attempt %d)\n",
272
                         time_diff, nsec_diff, GC_n_attempts);
273
      return TRUE;
×
274
    }
275

276
    return FALSE;
×
277
  }
278
#endif /* !GC_DISABLE_INCREMENTAL */
279

280
#ifdef THREADS
281
  GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
282
#endif
283

284
static size_t min_bytes_allocd_minimum = 1;
285
                        /* The lowest value returned by min_bytes_allocd(). */
286

287
GC_API void GC_CALL GC_set_min_bytes_allocd(size_t value)
2✔
288
{
289
    GC_ASSERT(value > 0);
2✔
290
    min_bytes_allocd_minimum = value;
2✔
291
}
2✔
292

293
GC_API size_t GC_CALL GC_get_min_bytes_allocd(void)
2✔
294
{
295
    return min_bytes_allocd_minimum;
2✔
296
}
297

298
/* Return the minimum number of bytes that must be allocated between    */
299
/* collections to amortize the collection cost.  Should be non-zero.    */
300
static word min_bytes_allocd(void)
44,934✔
301
{
302
    word result;
303
    word stack_size;
304
    word total_root_size;       /* includes double stack size,  */
305
                                /* since the stack is expensive */
306
                                /* to scan.                     */
307
    word scan_size;             /* Estimate of memory to be scanned     */
308
                                /* during normal GC.                    */
309

310
#   ifdef THREADS
311
      if (GC_need_to_lock) {
44,934✔
312
        /* We are multi-threaded... */
313
        stack_size = GC_total_stacksize;
18,819✔
314
        /* For now, we just use the value computed during the latest GC. */
315
#       ifdef DEBUG_THREADS
316
          GC_log_printf("Total stacks size: %lu\n",
317
                        (unsigned long)stack_size);
318
#       endif
319
      } else
320
#   endif
321
    /* else*/ {
322
#     ifdef STACK_NOT_SCANNED
323
        stack_size = 0;
324
#     elif defined(STACK_GROWS_UP)
325
        stack_size = GC_approx_sp() - GC_stackbottom;
326
#     else
327
        stack_size = GC_stackbottom - GC_approx_sp();
26,115✔
328
#     endif
329
    }
330

331
    total_root_size = 2 * stack_size + GC_root_size;
44,934✔
332
    scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
44,934✔
333
                + total_root_size;
334
    result = scan_size / GC_free_space_divisor;
44,934✔
335
    if (GC_incremental) {
44,934✔
336
      result /= 2;
34,617✔
337
    }
338
    return result > min_bytes_allocd_minimum
44,934✔
339
            ? result : min_bytes_allocd_minimum;
44,934✔
340
}
341

342
STATIC word GC_non_gc_bytes_at_gc = 0;
343
                /* Number of explicitly managed bytes of storage        */
344
                /* at last collection.                                  */
345

346
/* Return the number of bytes allocated, adjusted for explicit storage  */
347
/* management, etc..  This number is used in deciding when to trigger   */
348
/* collections.                                                         */
349
STATIC word GC_adj_bytes_allocd(void)
8,651,657✔
350
{
351
    signed_word result;
352
    signed_word expl_managed = (signed_word)GC_non_gc_bytes
17,303,314✔
353
                                - (signed_word)GC_non_gc_bytes_at_gc;
8,651,657✔
354

355
    /* Don't count what was explicitly freed, or newly allocated for    */
356
    /* explicit management.  Note that deallocating an explicitly       */
357
    /* managed object should not alter result, assuming the client      */
358
    /* is playing by the rules.                                         */
359
    result = (signed_word)GC_bytes_allocd
17,303,314✔
360
             + (signed_word)GC_bytes_dropped
8,651,657✔
361
             - (signed_word)GC_bytes_freed
8,651,657✔
362
             + (signed_word)GC_finalizer_bytes_freed
8,651,657✔
363
             - expl_managed;
364
    if (result > (signed_word)GC_bytes_allocd) {
8,651,657✔
365
        result = GC_bytes_allocd;
1,393,504✔
366
        /* probably client bug or unfortunate scheduling */
367
    }
368
    result += GC_bytes_finalized;
8,651,657✔
369
        /* We count objects enqueued for finalization as though they    */
370
        /* had been reallocated this round. Finalization is user        */
371
        /* visible progress.  And if we don't count this, we have       */
372
        /* stability problems for programs that finalize all objects.   */
373
    if (result < (signed_word)(GC_bytes_allocd >> 3)) {
8,651,657✔
374
        /* Always count at least 1/8 of the allocations.  We don't want */
375
        /* to collect too infrequently, since that would inhibit        */
376
        /* coalescing of free storage blocks.                           */
377
        /* This also makes us partially robust against client bugs.     */
378
        result = (signed_word)(GC_bytes_allocd >> 3);
809✔
379
    }
380
    return (word)result;
8,651,657✔
381
}
382

383

384
/* Clear up a few frames worth of garbage left at the top of the stack. */
385
/* This is used to prevent us from accidentally treating garbage left   */
386
/* on the stack by other parts of the collector as roots.  This         */
387
/* differs from the code in misc.c, which actually tries to keep the    */
388
/* stack clear of long-lived, client-generated garbage.                 */
389
STATIC void GC_clear_a_few_frames(void)
29,390✔
390
{
391
#   ifndef CLEAR_NWORDS
392
#     define CLEAR_NWORDS 64
393
#   endif
394
    volatile word frames[CLEAR_NWORDS];
395
    BZERO((word *)frames, CLEAR_NWORDS * sizeof(word));
29,390✔
396
}
29,390✔
397

398
GC_API void GC_CALL GC_start_incremental_collection(void)
×
399
{
400
# ifndef GC_DISABLE_INCREMENTAL
401
    if (!GC_incremental) return;
×
402

403
    LOCK();
×
404
    GC_should_start_incremental_collection = TRUE;
×
405
    if (!GC_dont_gc) {
×
406
      ENTER_GC();
×
407
      GC_collect_a_little_inner(1);
×
408
      EXIT_GC();
×
409
    }
410
    UNLOCK();
×
411
# endif
412
}
413

414
/* Have we allocated enough to amortize a collection? */
415
GC_INNER GC_bool GC_should_collect(void)
8,657,431✔
416
{
417
    static word last_min_bytes_allocd;
418
    static word last_gc_no;
419

420
    GC_ASSERT(I_HOLD_LOCK());
8,657,431✔
421
    if (last_gc_no != GC_gc_no) {
8,657,431✔
422
      last_min_bytes_allocd = min_bytes_allocd();
28,916✔
423
      last_gc_no = GC_gc_no;
28,916✔
424
    }
425
# ifndef GC_DISABLE_INCREMENTAL
426
    if (GC_should_start_incremental_collection) {
8,657,431✔
427
      GC_should_start_incremental_collection = FALSE;
×
428
      return TRUE;
×
429
    }
430
# endif
431
    if (GC_disable_automatic_collection) return FALSE;
8,657,431✔
432

433
    if (GC_last_heap_growth_gc_no == GC_gc_no)
8,657,431✔
434
      return TRUE; /* avoid expanding past limits used by blacklisting  */
5,774✔
435

436
    return GC_adj_bytes_allocd() >= last_min_bytes_allocd;
8,651,657✔
437
}
438

439
/* STATIC */ GC_start_callback_proc GC_start_call_back = 0;
440
                        /* Called at start of full collections.         */
441
                        /* Not called if 0.  Called with the allocation */
442
                        /* lock held.  Not used by GC itself.           */
443

444
GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn)
2✔
445
{
446
    LOCK();
2✔
447
    GC_start_call_back = fn;
2✔
448
    UNLOCK();
2✔
449
}
2✔
450

451
GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void)
2✔
452
{
453
    GC_start_callback_proc fn;
454

455
    LOCK();
2✔
456
    fn = GC_start_call_back;
2✔
457
    UNLOCK();
2✔
458
    return fn;
2✔
459
}
460

461
GC_INLINE void GC_notify_full_gc(void)
13,825✔
462
{
463
    if (GC_start_call_back != 0) {
13,825✔
464
        (*GC_start_call_back)();
×
465
    }
466
}
13,825✔
467

468
STATIC GC_bool GC_is_full_gc = FALSE;
469

470
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
471
STATIC void GC_finish_collection(void);
472

473
/* Initiate a garbage collection if appropriate.  Choose judiciously    */
474
/* between partial, full, and stop-world collections.                   */
475
STATIC void GC_maybe_gc(void)
6,883,646✔
476
{
477
  static int n_partial_gcs = 0;
478

479
  GC_ASSERT(I_HOLD_LOCK());
6,883,646✔
480
  ASSERT_CANCEL_DISABLED();
6,883,646✔
481
  if (!GC_should_collect()) return;
6,883,646✔
482

483
  if (!GC_incremental) {
16,980✔
484
    GC_gcollect_inner();
×
485
    return;
×
486
  }
487

488
  GC_ASSERT(!GC_collection_in_progress());
16,980✔
489
# ifdef PARALLEL_MARK
490
    if (GC_parallel)
16,980✔
491
      GC_wait_for_reclaim();
8,923✔
492
# endif
493
  if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
16,980✔
494
    GC_COND_LOG_PRINTF(
1,415✔
495
                "***>Full mark for collection #%lu after %lu allocd bytes\n",
496
                (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
497
    GC_promote_black_lists();
1,415✔
498
    (void)GC_reclaim_all((GC_stop_func)0, TRUE);
1,415✔
499
    GC_notify_full_gc();
1,415✔
500
    GC_clear_marks();
1,415✔
501
    n_partial_gcs = 0;
1,415✔
502
    GC_is_full_gc = TRUE;
1,415✔
503
  } else {
504
    n_partial_gcs++;
15,565✔
505
  }
506

507
  /* Try to mark with the world stopped.  If we run out of      */
508
  /* time, this turns into an incremental marking.              */
509
# ifndef NO_CLOCK
510
    if (GC_time_limit != GC_TIME_UNLIMITED) GET_TIME(GC_start_time);
16,980✔
511
# endif
512
  if (GC_stopped_mark(GC_timeout_stop_func)) {
16,980✔
513
#   ifdef SAVE_CALL_CHAIN
514
      GC_save_callers(GC_last_stack);
515
#   endif
516
    GC_finish_collection();
16,982✔
517
  } else if (!GC_is_full_gc) {
×
518
    /* Count this as the first attempt. */
519
    GC_n_attempts++;
×
520
  }
521
}
522

523
STATIC GC_on_collection_event_proc GC_on_collection_event = 0;
524

525
GC_API void GC_CALL GC_set_on_collection_event(GC_on_collection_event_proc fn)
2✔
526
{
527
    /* fn may be 0 (means no event notifier). */
528
    LOCK();
2✔
529
    GC_on_collection_event = fn;
2✔
530
    UNLOCK();
2✔
531
}
2✔
532

533
GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void)
2✔
534
{
535
    GC_on_collection_event_proc fn;
536

537
    LOCK();
2✔
538
    fn = GC_on_collection_event;
2✔
539
    UNLOCK();
2✔
540
    return fn;
2✔
541
}
542

543
/* Stop the world garbage collection.  If stop_func is not      */
544
/* GC_never_stop_func then abort if stop_func returns TRUE.     */
545
/* Return TRUE if we successfully completed the collection.     */
546
GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
12,688✔
547
{
548
#   ifndef NO_CLOCK
549
      CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
12,688✔
550
      GC_bool start_time_valid;
551
#   endif
552

553
    ASSERT_CANCEL_DISABLED();
12,688✔
554
    GC_ASSERT(I_HOLD_LOCK());
12,688✔
555
    GC_ASSERT(GC_is_initialized);
12,688✔
556
    if (GC_dont_gc || (*stop_func)()) return FALSE;
25,376✔
557
    if (GC_on_collection_event)
12,410✔
558
      GC_on_collection_event(GC_EVENT_START);
×
559
    if (GC_incremental && GC_collection_in_progress()) {
12,410✔
560
      GC_COND_LOG_PRINTF(
×
561
            "GC_try_to_collect_inner: finishing collection in progress\n");
562
      /* Just finish collection already in progress.    */
563
        do {
564
            if ((*stop_func)()) {
×
565
              /* TODO: Notify GC_EVENT_ABANDON */
566
              return FALSE;
×
567
            }
568
            ENTER_GC();
×
569
            GC_collect_a_little_inner(1);
×
570
            EXIT_GC();
×
571
        } while (GC_collection_in_progress());
×
572
    }
573
    GC_notify_full_gc();
12,410✔
574
#   ifndef NO_CLOCK
575
      start_time_valid = FALSE;
12,410✔
576
      if ((GC_print_stats | (int)measure_performance) != 0) {
12,410✔
577
        if (GC_print_stats)
2,031✔
578
          GC_log_printf("Initiating full world-stop collection!\n");
×
579
        start_time_valid = TRUE;
2,031✔
580
        GET_TIME(start_time);
2,031✔
581
      }
582
#   endif
583
    GC_promote_black_lists();
12,410✔
584
    /* Make sure all blocks have been reclaimed, so sweep routines      */
585
    /* don't see cleared mark bits.                                     */
586
    /* If we're guaranteed to finish, then this is unnecessary.         */
587
    /* In the find_leak case, we have to finish to guarantee that       */
588
    /* previously unmarked objects are not reported as leaks.           */
589
#       ifdef PARALLEL_MARK
590
          if (GC_parallel)
12,410✔
591
            GC_wait_for_reclaim();
4,038✔
592
#       endif
593
        if ((GC_find_leak || stop_func != GC_never_stop_func)
12,410✔
594
            && !GC_reclaim_all(stop_func, FALSE)) {
66✔
595
            /* Aborted.  So far everything is still consistent. */
596
            /* TODO: Notify GC_EVENT_ABANDON */
597
            return FALSE;
×
598
        }
599
    GC_invalidate_mark_state();  /* Flush mark stack.   */
12,410✔
600
    GC_clear_marks();
12,410✔
601
#   ifdef SAVE_CALL_CHAIN
602
        GC_save_callers(GC_last_stack);
603
#   endif
604
    GC_is_full_gc = TRUE;
12,410✔
605
    if (!GC_stopped_mark(stop_func)) {
12,410✔
606
      if (!GC_incremental) {
×
607
        /* We're partially done and have no way to complete or use      */
608
        /* current work.  Reestablish invariants as cheaply as          */
609
        /* possible.                                                    */
610
        GC_invalidate_mark_state();
×
611
        GC_unpromote_black_lists();
×
612
      } /* else we claim the world is already still consistent.  We'll  */
613
        /* finish incrementally.                                        */
614
      /* TODO: Notify GC_EVENT_ABANDON */
615
      return FALSE;
×
616
    }
617
    GC_finish_collection();
12,410✔
618
#   ifndef NO_CLOCK
619
      if (start_time_valid) {
12,410✔
620
        CLOCK_TYPE current_time;
621
        unsigned long time_diff, ns_frac_diff;
622

623
        GET_TIME(current_time);
2,031✔
624
        time_diff = MS_TIME_DIFF(current_time, start_time);
2,031✔
625
        ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
2,031✔
626
        if (measure_performance) {
2,031✔
627
          full_gc_total_time += time_diff; /* may wrap */
2,031✔
628
          full_gc_total_ns_frac += (unsigned)ns_frac_diff;
2,031✔
629
          if (full_gc_total_ns_frac >= 1000000U) {
2,031✔
630
            /* Overflow of the nanoseconds part. */
631
            full_gc_total_ns_frac -= 1000000U;
1,000✔
632
            full_gc_total_time++;
1,000✔
633
          }
634
        }
635
        if (GC_print_stats)
2,031✔
636
          GC_log_printf("Complete collection took %lu ms %lu ns\n",
×
637
                        time_diff, ns_frac_diff);
638
      }
639
#   endif
640
    if (GC_on_collection_event)
12,410✔
641
      GC_on_collection_event(GC_EVENT_END);
×
642
    return TRUE;
12,410✔
643
}
644

645
/* The number of extra calls to GC_mark_some that we have made. */
646
STATIC int GC_deficit = 0;
647

648
/* The default value of GC_rate.        */
649
#ifndef GC_RATE
650
# define GC_RATE 10
651
#endif
652

653
/* When GC_collect_a_little_inner() performs n units of GC work, a unit */
654
/* is intended to touch roughly GC_rate pages.  (But, every once in     */
655
/* a while, we do more than that.)  This needs to be a fairly large     */
656
/* number with our current incremental GC strategy, since otherwise we  */
657
/* allocate too much during GC, and the cleanup gets expensive.         */
658
STATIC int GC_rate = GC_RATE;
659

660
GC_API void GC_CALL GC_set_rate(int value)
2✔
661
{
662
    GC_ASSERT(value > 0);
2✔
663
    GC_rate = value;
2✔
664
}
2✔
665

666
GC_API int GC_CALL GC_get_rate(void)
2✔
667
{
668
    return GC_rate;
2✔
669
}
670

671
/* The default maximum number of prior attempts at world stop marking.  */
672
#ifndef MAX_PRIOR_ATTEMPTS
673
# define MAX_PRIOR_ATTEMPTS 3
674
#endif
675

676
/* The maximum number of prior attempts at world stop marking.          */
677
/* A value of 1 means that we finish the second time, no matter how     */
678
/* long it takes.  Does not count the initial root scan for a full GC.  */
679
static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
680

681
GC_API void GC_CALL GC_set_max_prior_attempts(int value)
2✔
682
{
683
    GC_ASSERT(value >= 0);
2✔
684
    max_prior_attempts = value;
2✔
685
}
2✔
686

687
GC_API int GC_CALL GC_get_max_prior_attempts(void)
2✔
688
{
689
    return max_prior_attempts;
2✔
690
}
691

692
GC_INNER void GC_collect_a_little_inner(int n)
7,872,787✔
693
{
694
    IF_CANCEL(int cancel_state;)
695

696
    GC_ASSERT(I_HOLD_LOCK());
7,872,787✔
697
    GC_ASSERT(GC_is_initialized);
7,872,787✔
698
    DISABLE_CANCEL(cancel_state);
7,872,787✔
699
    if (GC_incremental && GC_collection_in_progress()) {
7,872,787✔
700
        int i;
701
        int max_deficit = GC_rate * n;
×
702

703
#       ifdef PARALLEL_MARK
704
            if (GC_time_limit != GC_TIME_UNLIMITED)
×
705
                GC_parallel_mark_disabled = TRUE;
×
706
#       endif
707
        for (i = GC_deficit; i < max_deficit; i++) {
×
708
            if (GC_mark_some(NULL))
×
709
                break;
×
710
        }
711
#       ifdef PARALLEL_MARK
712
            GC_parallel_mark_disabled = FALSE;
×
713
#       endif
714

715
        if (i < max_deficit && !GC_dont_gc) {
×
716
            GC_ASSERT(!GC_collection_in_progress());
×
717
            /* Need to follow up with a full collection.        */
718
#           ifdef SAVE_CALL_CHAIN
719
                GC_save_callers(GC_last_stack);
720
#           endif
721
#           ifdef PARALLEL_MARK
722
                if (GC_parallel)
×
723
                    GC_wait_for_reclaim();
×
724
#           endif
725
#           ifndef NO_CLOCK
726
                if (GC_time_limit != GC_TIME_UNLIMITED
×
727
                        && GC_n_attempts < max_prior_attempts)
×
728
                    GET_TIME(GC_start_time);
×
729
#           endif
730
            if (GC_stopped_mark(GC_n_attempts < max_prior_attempts ?
×
731
                                GC_timeout_stop_func : GC_never_stop_func)) {
732
                GC_finish_collection();
×
733
            } else {
734
                GC_n_attempts++;
×
735
            }
736
        }
737
        if (GC_deficit > 0) {
×
738
            GC_deficit -= max_deficit;
×
739
            if (GC_deficit < 0)
×
740
                GC_deficit = 0;
×
741
        }
742
    } else if (!GC_dont_gc) {
7,872,787✔
743
        GC_maybe_gc();
6,883,644✔
744
    }
745
    RESTORE_CANCEL(cancel_state);
7,872,789✔
746
}
7,872,789✔
747

748
GC_INNER void (*GC_check_heap)(void) = 0;
749
GC_INNER void (*GC_print_all_smashed)(void) = 0;
750

751
GC_API int GC_CALL GC_collect_a_little(void)
4,314,846✔
752
{
753
    int result;
754

755
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
4,314,846✔
756
    LOCK();
4,314,818✔
757
    ENTER_GC();
4,314,871✔
758
    /* Note: if the collection is in progress, this may do marking (not */
759
    /* stopping the world) even in case of disabled GC.                 */
760
    GC_collect_a_little_inner(1);
4,314,871✔
761
    EXIT_GC();
4,314,871✔
762
    result = (int)GC_collection_in_progress();
4,314,871✔
763
    UNLOCK();
4,314,871✔
764
    if (!result && GC_debugging_started) GC_print_all_smashed();
4,314,842✔
765
    return result;
4,314,842✔
766
}
767

768
#ifndef NO_CLOCK
769
  /* Variables for world-stop average delay time statistic computation. */
770
  /* "divisor" is incremented every world-stop and halved when reached  */
771
  /* its maximum (or upon "total_time" overflow).                       */
772
  static unsigned world_stopped_total_time = 0;
773
  static unsigned world_stopped_total_divisor = 0;
774
# ifndef MAX_TOTAL_TIME_DIVISOR
775
    /* We shall not use big values here (so "outdated" delay time       */
776
    /* values would have less impact on "average" delay time value than */
777
    /* newer ones).                                                     */
778
#   define MAX_TOTAL_TIME_DIVISOR 1000
779
# endif
780
#endif /* !NO_CLOCK */
781

782
#ifdef USE_MUNMAP
783
# ifndef MUNMAP_THRESHOLD
784
#   define MUNMAP_THRESHOLD 7
785
# endif
786
  GC_INNER unsigned GC_unmap_threshold = MUNMAP_THRESHOLD;
787

788
# define IF_USE_MUNMAP(x) x
789
# define COMMA_IF_USE_MUNMAP(x) /* comma */, x
790
#else
791
# define IF_USE_MUNMAP(x) /* empty */
792
# define COMMA_IF_USE_MUNMAP(x) /* empty */
793
#endif
794

795
/* We stop the world and mark from all roots.  If stop_func() ever      */
796
/* returns TRUE, we may fail and return FALSE.  Increment GC_gc_no if   */
797
/* we succeed.                                                          */
798
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
29,390✔
799
{
800
    int abandoned_at;
801
    ptr_t cold_gc_frame = GC_approx_sp();
29,390✔
802
#   ifndef NO_CLOCK
803
      CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
29,390✔
804
      GC_bool start_time_valid = FALSE;
29,390✔
805
#   endif
806

807
    GC_ASSERT(I_HOLD_LOCK());
29,390✔
808
    GC_ASSERT(GC_is_initialized);
29,390✔
809
#   if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
810
        GC_add_current_malloc_heap();
811
#   endif
812
#   if defined(REGISTER_LIBRARIES_EARLY)
813
        GC_cond_register_dynamic_libraries();
29,390✔
814
#   endif
815

816
#   if !defined(GC_NO_FINALIZATION) && !defined(GC_TOGGLE_REFS_NOT_NEEDED)
817
      GC_process_togglerefs();
29,390✔
818
#   endif
819

820
        /* Output blank line for convenience here.      */
821
    GC_COND_LOG_PRINTF(
29,390✔
822
              "\n--> Marking for collection #%lu after %lu allocated bytes\n",
823
              (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
824
#   ifndef NO_CLOCK
825
      if (GC_PRINT_STATS_FLAG || measure_performance) {
29,390✔
826
        GET_TIME(start_time);
10,814✔
827
        start_time_valid = TRUE;
10,814✔
828
      }
829
#   endif
830
#   ifdef THREADS
831
      if (GC_on_collection_event)
29,390✔
832
        GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD);
×
833
#   endif
834
    STOP_WORLD();
29,390✔
835
#   ifdef THREADS
836
      if (GC_on_collection_event)
29,390✔
837
        GC_on_collection_event(GC_EVENT_POST_STOP_WORLD);
×
838
#   endif
839
#   ifdef THREAD_LOCAL_ALLOC
840
      GC_world_stopped = TRUE;
29,390✔
841
#   endif
842

843
#   ifdef MAKE_BACK_GRAPH
844
      if (GC_print_back_height) {
845
        GC_build_back_graph();
846
      }
847
#   endif
848

849
    /* Notify about marking from all roots.     */
850
        if (GC_on_collection_event)
29,390✔
851
          GC_on_collection_event(GC_EVENT_MARK_START);
×
852

853
    /* Minimize junk left in my registers and on the stack.     */
854
            GC_clear_a_few_frames();
29,390✔
855
            GC_noop6(0,0,0,0,0,0);
29,390✔
856

857
        GC_initiate_gc();
29,390✔
858
#       ifdef PARALLEL_MARK
859
          if (stop_func != GC_never_stop_func)
29,390✔
860
            GC_parallel_mark_disabled = TRUE;
16,980✔
861
#       endif
862
        for (abandoned_at = 0; !(*stop_func)(); abandoned_at++) {
7,495,139✔
863
          if (GC_mark_some(cold_gc_frame)) {
7,495,139✔
864
#           ifdef PARALLEL_MARK
865
              if (GC_parallel && GC_parallel_mark_disabled) {
29,390✔
866
                GC_COND_LOG_PRINTF("Stopped marking done after %d iterations"
8,923✔
867
                                   " with disabled parallel marker\n",
868
                                   abandoned_at);
869
              }
870
#           endif
871
            abandoned_at = -1;
29,390✔
872
            break;
29,390✔
873
          }
874
        }
875
#       ifdef PARALLEL_MARK
876
          GC_parallel_mark_disabled = FALSE;
29,390✔
877
#       endif
878

879
    if (abandoned_at >= 0) {
29,390✔
880
      GC_deficit = abandoned_at; /* Give the mutator a chance. */
×
881
      /* TODO: Notify GC_EVENT_MARK_ABANDON */
882
    } else {
883
      GC_gc_no++;
29,390✔
884
      /* Check all debugged objects for consistency.    */
885
      if (GC_debugging_started) {
29,390✔
886
        (*GC_check_heap)();
247✔
887
      }
888
      if (GC_on_collection_event)
29,390✔
889
        GC_on_collection_event(GC_EVENT_MARK_END);
×
890
    }
891

892
#   ifdef THREADS
893
      if (GC_on_collection_event)
29,390✔
894
        GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
×
895
#   endif
896
#   ifdef THREAD_LOCAL_ALLOC
897
      GC_world_stopped = FALSE;
29,390✔
898
#   endif
899
    START_WORLD();
29,390✔
900
#   ifdef THREADS
901
      if (GC_on_collection_event)
29,392✔
902
        GC_on_collection_event(GC_EVENT_POST_START_WORLD);
×
903
#   endif
904

905
#   ifndef NO_CLOCK
906
      if (start_time_valid) {
29,392✔
907
        CLOCK_TYPE current_time;
908
        unsigned long time_diff, ns_frac_diff;
909

910
        /* TODO: Avoid code duplication from GC_try_to_collect_inner */
911
        GET_TIME(current_time);
10,816✔
912
        time_diff = MS_TIME_DIFF(current_time, start_time);
10,816✔
913
        ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
10,816✔
914
        if (measure_performance) {
10,816✔
915
          stopped_mark_total_time += time_diff; /* may wrap */
10,816✔
916
          stopped_mark_total_ns_frac += (unsigned)ns_frac_diff;
10,816✔
917
          if (stopped_mark_total_ns_frac >= 1000000U) {
10,816✔
918
            stopped_mark_total_ns_frac -= 1000000U;
5,852✔
919
            stopped_mark_total_time++;
5,852✔
920
          }
921
        }
922

923
        if (GC_PRINT_STATS_FLAG) {
10,816✔
924
          unsigned total_time = world_stopped_total_time;
×
925
          unsigned divisor = world_stopped_total_divisor;
×
926

927
          /* Compute new world-stop delay total time.   */
928
          if (total_time > (((unsigned)-1) >> 1)
×
929
              || divisor >= MAX_TOTAL_TIME_DIVISOR) {
×
930
            /* Halve values if overflow occurs. */
931
            total_time >>= 1;
×
932
            divisor >>= 1;
×
933
          }
934
          total_time += time_diff < (((unsigned)-1) >> 1) ?
×
935
                        (unsigned)time_diff : ((unsigned)-1) >> 1;
×
936
          /* Update old world_stopped_total_time and its divisor.   */
937
          world_stopped_total_time = total_time;
×
938
          world_stopped_total_divisor = ++divisor;
×
939
          if (abandoned_at < 0) {
×
940
            GC_ASSERT(divisor != 0);
×
941
            GC_log_printf("World-stopped marking took %lu ms %lu ns"
×
942
                          " (%u ms in average)\n", time_diff, ns_frac_diff,
943
                          total_time / divisor);
944
          }
945
        }
946
      }
947
#   endif
948

949
    if (abandoned_at >= 0) {
29,392✔
950
      GC_COND_LOG_PRINTF("Abandoned stopped marking after %d iterations\n",
×
951
                         abandoned_at);
952
      return FALSE;
29,392✔
953
    }
954
    return TRUE;
29,392✔
955
}
956

957
/* Set all mark bits for the free list whose first entry is q   */
958
GC_INNER void GC_set_fl_marks(ptr_t q)
1,262,629✔
959
{
960
    if (q /* != NULL */) { /* CPPCHECK */
1,262,629✔
961
      struct hblk *h = HBLKPTR(q);
1,262,629✔
962
      struct hblk *last_h = h;
1,262,629✔
963
      hdr *hhdr = HDR(h);
1,262,629✔
964
      IF_PER_OBJ(word sz = hhdr->hb_sz;)
965

966
      for (;;) {
967
        word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
36,352,494✔
968

969
        if (!mark_bit_from_hdr(hhdr, bit_no)) {
36,352,494✔
970
          set_mark_bit_from_hdr(hhdr, bit_no);
16,673,561✔
971
          ++hhdr -> hb_n_marks;
16,673,561✔
972
        }
973

974
        q = (ptr_t)obj_link(q);
36,352,494✔
975
        if (q == NULL)
36,352,494✔
976
          break;
1,262,629✔
977

978
        h = HBLKPTR(q);
35,089,865✔
979
        if (h != last_h) {
35,089,865✔
980
          last_h = h;
56,572✔
981
          hhdr = HDR(h);
56,572✔
982
          IF_PER_OBJ(sz = hhdr->hb_sz;)
983
        }
984
      }
35,089,865✔
985
    }
986
}
1,262,629✔
987

988
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
989
  /* Check that all mark bits for the free list whose first entry is    */
990
  /* (*pfreelist) are set.  Check skipped if points to a special value. */
991
  void GC_check_fl_marks(void **pfreelist)
20,743,873✔
992
  {
993
    /* TODO: There is a data race with GC_FAST_MALLOC_GRANS (which does */
994
    /* not do atomic updates to the free-list).  The race seems to be   */
995
    /* harmless, and for now we just skip this check in case of TSan.   */
996
#   if defined(AO_HAVE_load_acquire_read) && !defined(THREAD_SANITIZER)
997
      AO_t *list = (AO_t *)AO_load_acquire_read((AO_t *)pfreelist);
20,743,873✔
998
                /* Atomic operations are used because the world is running. */
999
      AO_t *prev;
1000
      AO_t *p;
1001

1002
      if ((word)list <= HBLKSIZE) return;
20,743,873✔
1003

1004
      prev = (AO_t *)pfreelist;
1,262,155✔
1005
      for (p = list; p != NULL;) {
38,861,213✔
1006
        AO_t *next;
1007

1008
        if (!GC_is_marked(p)) {
36,336,905✔
1009
          ABORT_ARG2("Unmarked local free list entry",
×
1010
                     ": object %p on list %p", (void *)p, (void *)list);
1011
        }
1012

1013
        /* While traversing the free-list, it re-reads the pointer to   */
1014
        /* the current node before accepting its next pointer and       */
1015
        /* bails out if the latter has changed.  That way, it won't     */
1016
        /* try to follow the pointer which might be been modified       */
1017
        /* after the object was returned to the client.  It might       */
1018
        /* perform the mark-check on the just allocated object but      */
1019
        /* that should be harmless.                                     */
1020
        next = (AO_t *)AO_load_acquire_read(p);
36,336,904✔
1021
        if (AO_load(prev) != (AO_t)p)
36,336,904✔
1022
          break;
1✔
1023
        prev = p;
36,336,903✔
1024
        p = next;
36,336,903✔
1025
      }
1026
#   else
1027
      /* FIXME: Not implemented (just skipped). */
1028
      (void)pfreelist;
1029
#   endif
1030
  }
1031
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
1032

1033
/* Clear all mark bits for the free list whose first entry is q */
1034
/* Decrement GC_bytes_found by number of bytes on free list.    */
1035
STATIC void GC_clear_fl_marks(ptr_t q)
49,609✔
1036
{
1037
      struct hblk *h = HBLKPTR(q);
49,609✔
1038
      struct hblk *last_h = h;
49,609✔
1039
      hdr *hhdr = HDR(h);
49,609✔
1040
      word sz = hhdr->hb_sz; /* Normally set only once. */
49,609✔
1041

1042
      for (;;) {
1043
        word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
4,914,472✔
1044

1045
        if (mark_bit_from_hdr(hhdr, bit_no)) {
4,914,472✔
1046
          size_t n_marks = hhdr -> hb_n_marks;
1,225,160✔
1047

1048
          GC_ASSERT(n_marks != 0);
1,225,160✔
1049
          clear_mark_bit_from_hdr(hhdr, bit_no);
1,225,160✔
1050
          n_marks--;
1,225,160✔
1051
#         ifdef PARALLEL_MARK
1052
            /* Appr. count, don't decrement to zero! */
1053
            if (0 != n_marks || !GC_parallel) {
1,225,160✔
1054
              hhdr -> hb_n_marks = n_marks;
1,224,602✔
1055
            }
1056
#         else
1057
            hhdr -> hb_n_marks = n_marks;
1058
#         endif
1059
        }
1060
        GC_bytes_found -= sz;
4,914,472✔
1061

1062
        q = (ptr_t)obj_link(q);
4,914,472✔
1063
        if (q == NULL)
4,914,472✔
1064
          break;
49,609✔
1065

1066
        h = HBLKPTR(q);
4,864,863✔
1067
        if (h != last_h) {
4,864,863✔
1068
          last_h = h;
485,532✔
1069
          hhdr = HDR(h);
485,532✔
1070
          sz = hhdr->hb_sz;
485,532✔
1071
        }
1072
      }
4,864,863✔
1073
}
49,609✔
1074

1075
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1076
  void GC_check_tls(void);
1077
#endif
1078

1079
GC_on_heap_resize_proc GC_on_heap_resize = 0;
1080

1081
/* Used for logging only. */
1082
GC_INLINE int GC_compute_heap_usage_percent(void)
×
1083
{
1084
  word used = GC_composite_in_use + GC_atomic_in_use + GC_bytes_allocd;
×
1085
  word heap_sz = GC_heapsize - GC_unmapped_bytes;
×
1086
# if defined(CPPCHECK)
1087
    word limit = (GC_WORD_MAX >> 1) / 50; /* to avoid a false positive */
1088
# else
1089
    const word limit = GC_WORD_MAX / 100;
×
1090
# endif
1091

1092
  return used >= heap_sz ? 0 : used < limit ?
×
1093
                (int)((used * 100) / heap_sz) : (int)(used / (heap_sz / 100));
×
1094
}
1095

1096
#define GC_DBGLOG_PRINT_HEAP_IN_USE() \
1097
  GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n", \
1098
                   GC_compute_heap_usage_percent(), \
1099
                   TO_KiB_UL(GC_composite_in_use), \
1100
                   TO_KiB_UL(GC_atomic_in_use + GC_bytes_allocd))
1101

1102
/* Finish up a collection.  Assumes mark bits are consistent, but the   */
1103
/* world is otherwise running.                                          */
1104
STATIC void GC_finish_collection(void)
29,392✔
1105
{
1106
#   ifndef NO_CLOCK
1107
      CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
29,392✔
1108
      CLOCK_TYPE finalize_time = CLOCK_TYPE_INITIALIZER;
29,392✔
1109
#   endif
1110

1111
    GC_ASSERT(I_HOLD_LOCK());
29,392✔
1112
#   if defined(GC_ASSERTIONS) \
1113
       && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1114
        /* Check that we marked some of our own data.           */
1115
        /* TODO: Add more checks. */
1116
        GC_check_tls();
29,392✔
1117
#   endif
1118

1119
#   ifndef NO_CLOCK
1120
      if (GC_print_stats)
29,391✔
1121
        GET_TIME(start_time);
×
1122
#   endif
1123
    if (GC_on_collection_event)
29,391✔
1124
      GC_on_collection_event(GC_EVENT_RECLAIM_START);
×
1125

1126
#   ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1127
      if (GC_bytes_found > 0)
29,391✔
1128
        GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
29,074✔
1129
#   endif
1130
    GC_bytes_found = 0;
29,391✔
1131
#   if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1132
        if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
29,391✔
1133
          GC_print_address_map();
×
1134
        }
1135
#   endif
1136
    COND_DUMP;
29,391✔
1137
    if (GC_find_leak) {
29,391✔
1138
      /* Mark all objects on the free list.  All objects should be      */
1139
      /* marked when we're done.                                        */
1140
      word size;        /* current object size  */
1141
      unsigned kind;
1142
      ptr_t q;
1143

1144
      for (kind = 0; kind < GC_n_kinds; kind++) {
330✔
1145
        for (size = 1; size <= MAXOBJGRANULES; size++) {
34,056✔
1146
          q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
33,792✔
1147
          if (q != NULL)
33,792✔
1148
            GC_set_fl_marks(q);
538✔
1149
        }
1150
      }
1151
      GC_start_reclaim(TRUE);
66✔
1152
        /* The above just checks; it doesn't really reclaim anything.   */
1153
    }
1154

1155
#   ifndef GC_NO_FINALIZATION
1156
      GC_finalize();
29,391✔
1157
#   endif
1158
#   ifndef NO_CLOCK
1159
      if (GC_print_stats)
29,391✔
1160
        GET_TIME(finalize_time);
×
1161
#   endif
1162

1163
    if (GC_print_back_height) {
29,391✔
1164
#     ifdef MAKE_BACK_GRAPH
1165
        GC_traverse_back_graph();
1166
#     elif !defined(SMALL_CONFIG)
1167
        GC_err_printf("Back height not available: "
×
1168
                      "Rebuild collector with -DMAKE_BACK_GRAPH\n");
1169
#     endif
1170
    }
1171

1172
    /* Clear free list mark bits, in case they got accidentally marked   */
1173
    /* (or GC_find_leak is set and they were intentionally marked).      */
1174
    /* Also subtract memory remaining from GC_bytes_found count.         */
1175
    /* Note that composite objects on free list are cleared.             */
1176
    /* Thus accidentally marking a free list is not a problem;  only     */
1177
    /* objects on the list itself will be marked, and that's fixed here. */
1178
    {
1179
      word size;        /* current object size          */
1180
      ptr_t q;          /* pointer to current object    */
1181
      unsigned kind;
1182

1183
      for (kind = 0; kind < GC_n_kinds; kind++) {
162,869✔
1184
        for (size = 1; size <= MAXOBJGRANULES; size++) {
17,218,662✔
1185
          q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
17,085,184✔
1186
          if (q != NULL)
17,085,184✔
1187
            GC_clear_fl_marks(q);
49,609✔
1188
        }
1189
      }
1190
    }
1191

1192
    GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
29,391✔
1193
                          (long)GC_bytes_found);
1194

1195
    /* Reconstruct free lists to contain everything not marked */
1196
    GC_start_reclaim(FALSE);
29,391✔
1197

1198
#   ifdef USE_MUNMAP
1199
      if (GC_unmap_threshold > 0 /* unmapping enabled? */
29,390✔
1200
          && EXPECT(GC_gc_no != 1, TRUE)) /* do not unmap during GC init */
29,390✔
1201
        GC_unmap_old(GC_unmap_threshold);
29,358✔
1202

1203
      GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
29,392✔
1204
#   endif
1205
    GC_ASSERT(GC_our_mem_bytes >= GC_heapsize);
29,392✔
1206
    GC_DBGLOG_PRINTF("GC #%lu freed %ld bytes, heap %lu KiB ("
29,392✔
1207
                     IF_USE_MUNMAP("+ %lu KiB unmapped ")
1208
                     "+ %lu KiB internal)\n",
1209
                     (unsigned long)GC_gc_no, (long)GC_bytes_found,
1210
                     TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
×
1211
                     COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)),
×
1212
                     TO_KiB_UL(GC_our_mem_bytes - GC_heapsize
×
1213
                               + sizeof(GC_arrays)));
1214
    GC_DBGLOG_PRINT_HEAP_IN_USE();
29,392✔
1215
    if (GC_is_full_gc) {
29,392✔
1216
        GC_used_heap_size_after_full = GC_heapsize - GC_large_free_bytes;
13,825✔
1217
        GC_need_full_gc = FALSE;
13,825✔
1218
    } else {
1219
        GC_need_full_gc = GC_heapsize - GC_used_heap_size_after_full
31,134✔
1220
                          > min_bytes_allocd() + GC_large_free_bytes;
15,567✔
1221
    }
1222

1223
    /* Reset or increment counters for next cycle */
1224
    GC_n_attempts = 0;
29,392✔
1225
    GC_is_full_gc = FALSE;
29,392✔
1226
    GC_bytes_allocd_before_gc += GC_bytes_allocd;
29,392✔
1227
    GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
29,392✔
1228
    GC_bytes_allocd = 0;
29,392✔
1229
    GC_bytes_dropped = 0;
29,392✔
1230
    GC_bytes_freed = 0;
29,392✔
1231
    GC_finalizer_bytes_freed = 0;
29,392✔
1232

1233
    if (GC_on_collection_event)
29,392✔
1234
      GC_on_collection_event(GC_EVENT_RECLAIM_END);
×
1235
#   ifndef NO_CLOCK
1236
      if (GC_print_stats) {
29,392✔
1237
        CLOCK_TYPE done_time;
1238

1239
        GET_TIME(done_time);
×
1240
#       if !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1241
          /* A convenient place to output finalization statistics.      */
1242
          GC_print_finalization_stats();
×
1243
#       endif
1244
        GC_log_printf("Finalize and initiate sweep took %lu ms %lu ns"
×
1245
                      " + %lu ms %lu ns\n",
1246
                      MS_TIME_DIFF(finalize_time, start_time),
×
1247
                      NS_FRAC_TIME_DIFF(finalize_time, start_time),
×
1248
                      MS_TIME_DIFF(done_time, finalize_time),
×
1249
                      NS_FRAC_TIME_DIFF(done_time, finalize_time));
×
1250
      }
1251
#   elif !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1252
      if (GC_print_stats)
1253
        GC_print_finalization_stats();
1254
#   endif
1255
}
29,392✔
1256

1257
STATIC word GC_heapsize_at_forced_unmap = 0;
1258
                                /* accessed with the allocation lock held */
1259

1260
/* If stop_func == 0 then GC_default_stop_func is used instead.         */
1261
STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
2,461✔
1262
                                         GC_bool force_unmap)
1263
{
1264
    GC_bool result;
1265
    IF_USE_MUNMAP(int old_unmap_threshold;)
1266
    IF_CANCEL(int cancel_state;)
1267

1268
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
2,461✔
1269
    if (GC_debugging_started) GC_print_all_smashed();
2,461✔
1270
    GC_INVOKE_FINALIZERS();
2,461✔
1271
    LOCK();
2,461✔
1272
    if (force_unmap) {
2,461✔
1273
      /* Record current heap size to make heap growth more conservative */
1274
      /* afterwards (as if the heap is growing from zero size again).   */
1275
      GC_heapsize_at_forced_unmap = GC_heapsize;
70✔
1276
    }
1277
    DISABLE_CANCEL(cancel_state);
2,461✔
1278
#   ifdef USE_MUNMAP
1279
      old_unmap_threshold = GC_unmap_threshold;
2,461✔
1280
      if (force_unmap ||
4,852✔
1281
          (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
2,391✔
1282
        GC_unmap_threshold = 1; /* unmap as much as possible */
70✔
1283
#   endif
1284
    ENTER_GC();
2,461✔
1285
    /* Minimize junk left in my registers */
1286
      GC_noop6(0,0,0,0,0,0);
2,461✔
1287
    result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
2,461✔
1288
                                     GC_default_stop_func);
1289
    EXIT_GC();
2,461✔
1290
    IF_USE_MUNMAP(GC_unmap_threshold = old_unmap_threshold); /* restore */
2,461✔
1291
    RESTORE_CANCEL(cancel_state);
2,461✔
1292
    UNLOCK();
2,461✔
1293
    if (result) {
2,460✔
1294
        if (GC_debugging_started) GC_print_all_smashed();
2,182✔
1295
        GC_INVOKE_FINALIZERS();
2,182✔
1296
    }
1297
    return result;
2,460✔
1298
}
1299

1300
/* Externally callable routines to invoke full, stop-the-world collection. */
1301

1302
GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
×
1303
{
1304
    GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
×
1305
    return (int)GC_try_to_collect_general(stop_func, FALSE);
×
1306
}
1307

1308
GC_API void GC_CALL GC_gcollect(void)
2,391✔
1309
{
1310
    /* 0 is passed as stop_func to get GC_default_stop_func value       */
1311
    /* while holding the allocation lock (to prevent data races).       */
1312
    (void)GC_try_to_collect_general(0, FALSE);
2,391✔
1313
    if (get_have_errors())
2,390✔
1314
      GC_print_all_errors();
55✔
1315
}
2,390✔
1316

1317
GC_API void GC_CALL GC_gcollect_and_unmap(void)
70✔
1318
{
1319
    /* Collect and force memory unmapping to OS. */
1320
    (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
70✔
1321
}
70✔
1322

1323
#ifdef USE_PROC_FOR_LIBRARIES
1324
  /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
1325
  GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
1326
  {
1327
    GC_ASSERT(I_HOLD_LOCK());
1328
    GC_ASSERT(p != NULL);
1329
    if (GC_n_memory >= MAX_HEAP_SECTS)
1330
      ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1331
    GC_our_memory[GC_n_memory].hs_start = p;
1332
    GC_our_memory[GC_n_memory].hs_bytes = bytes;
1333
    GC_n_memory++;
1334
    GC_our_mem_bytes += bytes;
1335
  }
1336
#endif
1337

1338
/* Use the chunk of memory starting at p of size bytes as part of the heap. */
1339
/* Assumes p is HBLKSIZE aligned, bytes argument is a multiple of HBLKSIZE. */
1340
STATIC void GC_add_to_heap(struct hblk *p, size_t bytes)
676✔
1341
{
1342
    hdr * phdr;
1343
    word endp;
1344
    size_t old_capacity = 0;
676✔
1345
    void *old_heap_sects = NULL;
676✔
1346
#   ifdef GC_ASSERTIONS
1347
      unsigned i;
1348
#   endif
1349

1350
    GC_ASSERT(I_HOLD_LOCK());
676✔
1351
    GC_ASSERT((word)p % HBLKSIZE == 0);
676✔
1352
    GC_ASSERT(bytes % HBLKSIZE == 0);
676✔
1353
    GC_ASSERT(bytes > 0);
676✔
1354
    GC_ASSERT(GC_all_nils != NULL);
676✔
1355

1356
    if (EXPECT(GC_n_heap_sects == GC_capacity_heap_sects, FALSE)) {
676✔
1357
      /* Allocate new GC_heap_sects with sufficient capacity.   */
1358
#     ifndef INITIAL_HEAP_SECTS
1359
#       define INITIAL_HEAP_SECTS 32
1360
#     endif
1361
      size_t new_capacity = GC_n_heap_sects > 0 ?
70✔
1362
                (size_t)GC_n_heap_sects * 2 : INITIAL_HEAP_SECTS;
35✔
1363
      void *new_heap_sects =
35✔
1364
                GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
35✔
1365

1366
      if (NULL == new_heap_sects) {
35✔
1367
        /* Retry with smaller yet sufficient capacity.  */
1368
        new_capacity = (size_t)GC_n_heap_sects + INITIAL_HEAP_SECTS;
×
1369
        new_heap_sects =
×
1370
                GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
×
1371
        if (NULL == new_heap_sects)
×
1372
          ABORT("Insufficient memory for heap sections");
×
1373
      }
1374
      old_capacity = GC_capacity_heap_sects;
35✔
1375
      old_heap_sects = GC_heap_sects;
35✔
1376
      /* Transfer GC_heap_sects contents to the newly allocated array.  */
1377
      if (GC_n_heap_sects > 0)
35✔
1378
        BCOPY(old_heap_sects, new_heap_sects,
3✔
1379
              GC_n_heap_sects * sizeof(struct HeapSect));
1380
      GC_capacity_heap_sects = new_capacity;
35✔
1381
      GC_heap_sects = (struct HeapSect *)new_heap_sects;
35✔
1382
      GC_COND_LOG_PRINTF("Grew heap sections array to %lu elements\n",
35✔
1383
                         (unsigned long)new_capacity);
1384
    }
1385

1386
    while (EXPECT((word)p <= HBLKSIZE, FALSE)) {
1,352✔
1387
        /* Can't handle memory near address zero. */
1388
        ++p;
×
1389
        bytes -= HBLKSIZE;
×
1390
        if (0 == bytes) return;
×
1391
    }
1392
    endp = (word)p + bytes;
676✔
1393
    if (EXPECT(endp <= (word)p, FALSE)) {
676✔
1394
        /* Address wrapped. */
1395
        bytes -= HBLKSIZE;
×
1396
        if (0 == bytes) return;
×
1397
        endp -= HBLKSIZE;
×
1398
    }
1399
    phdr = GC_install_header(p);
676✔
1400
    if (EXPECT(NULL == phdr, FALSE)) {
676✔
1401
        /* This is extremely unlikely. Can't add it.  This will         */
1402
        /* almost certainly result in a 0 return from the allocator,    */
1403
        /* which is entirely appropriate.                               */
1404
        return;
×
1405
    }
1406
    GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
676✔
1407
#   ifdef GC_ASSERTIONS
1408
      /* Ensure no intersection between sections.       */
1409
      for (i = 0; i < GC_n_heap_sects; i++) {
10,806✔
1410
        word hs_start = (word)GC_heap_sects[i].hs_start;
10,130✔
1411
        word hs_end = hs_start + GC_heap_sects[i].hs_bytes;
10,130✔
1412

1413
        GC_ASSERT(!((hs_start <= (word)p && (word)p < hs_end)
10,130✔
1414
                    || (hs_start < endp && endp <= hs_end)
1415
                    || ((word)p < hs_start && hs_end < endp)));
1416
      }
1417
#   endif
1418
    GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
676✔
1419
    GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
676✔
1420
    GC_n_heap_sects++;
676✔
1421
    phdr -> hb_sz = bytes;
676✔
1422
    phdr -> hb_flags = 0;
676✔
1423
    GC_freehblk(p);
676✔
1424
    GC_heapsize += bytes;
676✔
1425

1426
    if ((word)p <= (word)GC_least_plausible_heap_addr
676✔
1427
        || EXPECT(NULL == GC_least_plausible_heap_addr, FALSE)) {
638✔
1428
        GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
38✔
1429
                /* Making it a little smaller than necessary prevents   */
1430
                /* us from getting a false hit from the variable        */
1431
                /* itself.  There's some unintentional reflection       */
1432
                /* here.                                                */
1433
    }
1434
    if (endp > (word)GC_greatest_plausible_heap_addr) {
676✔
1435
        GC_greatest_plausible_heap_addr = (void *)endp;
×
1436
    }
1437
#   ifdef SET_REAL_HEAP_BOUNDS
1438
      if ((word)p < GC_least_real_heap_addr
676✔
1439
          || EXPECT(0 == GC_least_real_heap_addr, FALSE))
170✔
1440
        GC_least_real_heap_addr = (word)p - sizeof(word);
538✔
1441
      if (endp > GC_greatest_real_heap_addr)
676✔
1442
        GC_greatest_real_heap_addr = endp;
44✔
1443
#   endif
1444
    if (EXPECT(old_capacity > 0, FALSE)) {
676✔
1445
#     ifndef GWW_VDB
1446
        /* Recycling may call GC_add_to_heap() again but should not     */
1447
        /* cause resizing of GC_heap_sects.                             */
1448
        GC_scratch_recycle_no_gww(old_heap_sects,
3✔
1449
                                  old_capacity * sizeof(struct HeapSect));
1450
#     else
1451
        /* TODO: implement GWW-aware recycling as in alloc_mark_stack */
1452
        GC_noop1((word)old_heap_sects);
1453
#     endif
1454
    }
1455
}
1456

1457
#if !defined(NO_DEBUGGING)
1458
  void GC_print_heap_sects(void)
×
1459
  {
1460
    unsigned i;
1461

1462
    GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
×
1463
              (unsigned long)GC_heapsize /*, */
×
1464
              COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
×
1465

1466
    for (i = 0; i < GC_n_heap_sects; i++) {
×
1467
      ptr_t start = GC_heap_sects[i].hs_start;
×
1468
      size_t len = GC_heap_sects[i].hs_bytes;
×
1469
      struct hblk *h;
1470
      unsigned nbl = 0;
×
1471

1472
      for (h = (struct hblk *)start; (word)h < (word)(start + len); h++) {
×
1473
        if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
×
1474
      }
1475
      GC_printf("Section %d from %p to %p %u/%lu blacklisted\n",
×
1476
                i, (void *)start, (void *)&start[len],
1477
                nbl, (unsigned long)divHBLKSZ(len));
×
1478
    }
1479
  }
×
1480
#endif
1481

1482
void * GC_least_plausible_heap_addr = (void *)GC_WORD_MAX;
1483
void * GC_greatest_plausible_heap_addr = 0;
1484

1485
STATIC word GC_max_heapsize = 0;
1486

1487
GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
2✔
1488
{
1489
    GC_max_heapsize = n;
2✔
1490
}
2✔
1491

1492
word GC_max_retries = 0;
1493

1494
GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes)
228✔
1495
{
1496
  size_t page_offset;
1497
  size_t displ = 0;
228✔
1498
  size_t recycled_bytes;
1499

1500
  GC_ASSERT(I_HOLD_LOCK());
228✔
1501
  if (NULL == ptr) return;
228✔
1502

1503
  GC_ASSERT(bytes != 0);
228✔
1504
  GC_ASSERT(GC_page_size != 0);
228✔
1505
  /* TODO: Assert correct memory flags if GWW_VDB */
1506
  page_offset = (word)ptr & (GC_page_size - 1);
228✔
1507
  if (page_offset != 0)
228✔
1508
    displ = GC_page_size - page_offset;
×
1509
  recycled_bytes = bytes > displ ? (bytes - displ) & ~(GC_page_size - 1) : 0;
228✔
1510
  GC_COND_LOG_PRINTF("Recycle %lu/%lu scratch-allocated bytes at %p\n",
228✔
1511
                (unsigned long)recycled_bytes, (unsigned long)bytes, ptr);
1512
  if (recycled_bytes > 0)
228✔
1513
    GC_add_to_heap((struct hblk *)((word)ptr + displ), recycled_bytes);
225✔
1514
}
1515

1516
/* This explicitly increases the size of the heap.  It is used          */
1517
/* internally, but may also be invoked from GC_expand_hp by the user.   */
1518
/* The argument is in units of HBLKSIZE (zero is treated as 1).         */
1519
/* Returns FALSE on failure.                                            */
1520
GC_INNER GC_bool GC_expand_hp_inner(word n)
469✔
1521
{
1522
    size_t bytes;
1523
    struct hblk * space;
1524
    word expansion_slop;        /* Number of bytes by which we expect   */
1525
                                /* the heap to expand soon.             */
1526

1527
    GC_ASSERT(I_HOLD_LOCK());
469✔
1528
    GC_ASSERT(GC_page_size != 0);
469✔
1529
    if (0 == n) n = 1;
469✔
1530
    bytes = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
469✔
1531
    GC_DBGLOG_PRINT_HEAP_IN_USE();
469✔
1532
    if (GC_max_heapsize != 0
469✔
1533
        && (GC_max_heapsize < (word)bytes
22✔
1534
            || GC_heapsize > GC_max_heapsize - (word)bytes)) {
4✔
1535
        /* Exceeded self-imposed limit */
1536
        return FALSE;
18✔
1537
    }
1538
    space = GET_MEM(bytes);
451✔
1539
    if (EXPECT(NULL == space, FALSE)) {
451✔
1540
        WARN("Failed to expand heap by %" WARN_PRIuPTR " KiB\n", bytes >> 10);
×
1541
        return FALSE;
×
1542
    }
1543
    GC_add_to_our_memory((ptr_t)space, bytes);
451✔
1544
    GC_last_heap_growth_gc_no = GC_gc_no;
451✔
1545
    GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
451✔
1546
                      TO_KiB_UL(GC_heapsize + bytes),
×
1547
                      (unsigned long)GC_bytes_allocd);
×
1548

1549
    /* Adjust heap limits generously for blacklisting to work better.   */
1550
    /* GC_add_to_heap performs minimal adjustment needed for            */
1551
    /* correctness.                                                     */
1552
    expansion_slop = min_bytes_allocd() + 4 * MAXHINCR * HBLKSIZE;
451✔
1553
    if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
451✔
1554
        || (GC_last_heap_addr != 0
419✔
1555
            && (word)GC_last_heap_addr < (word)space)) {
455✔
1556
        /* Assume the heap is growing up. */
1557
        word new_limit = (word)space + (word)bytes + expansion_slop;
36✔
1558
        if (new_limit > (word)space
36✔
1559
            && (word)GC_greatest_plausible_heap_addr < new_limit)
36✔
1560
          GC_greatest_plausible_heap_addr = (void *)new_limit;
34✔
1561
    } else {
1562
        /* Heap is growing down. */
1563
        word new_limit = (word)space - expansion_slop - sizeof(word);
415✔
1564
        if (new_limit < (word)space
415✔
1565
            && (word)GC_least_plausible_heap_addr > new_limit)
415✔
1566
          GC_least_plausible_heap_addr = (void *)new_limit;
413✔
1567
    }
1568
    GC_last_heap_addr = (ptr_t)space;
451✔
1569

1570
    GC_add_to_heap(space, bytes);
451✔
1571
    if (GC_on_heap_resize)
451✔
1572
        (*GC_on_heap_resize)(GC_heapsize);
×
1573

1574
    return TRUE;
451✔
1575
}
1576

1577
/* Really returns a bool, but it's externally visible, so that's clumsy. */
1578
GC_API int GC_CALL GC_expand_hp(size_t bytes)
4✔
1579
{
1580
    word n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(bytes);
4✔
1581
    word old_heapsize;
1582
    GC_bool result;
1583

1584
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
4✔
1585
    LOCK();
4✔
1586
    old_heapsize = GC_heapsize;
4✔
1587
    result = GC_expand_hp_inner(n_blocks);
4✔
1588
    if (result) {
4✔
1589
      GC_requested_heapsize += bytes;
4✔
1590
      if (GC_dont_gc) {
4✔
1591
        /* Do not call WARN if the heap growth is intentional.  */
1592
        GC_ASSERT(GC_heapsize >= old_heapsize);
2✔
1593
        GC_heapsize_on_gc_disable += GC_heapsize - old_heapsize;
2✔
1594
      }
1595
    }
1596
    UNLOCK();
4✔
1597
    return (int)result;
4✔
1598
}
1599

1600
GC_INNER unsigned GC_fail_count = 0;
1601
                        /* How many consecutive GC/expansion failures?  */
1602
                        /* Reset by GC_allochblk.                       */
1603

1604
/* The minimum value of the ratio of allocated bytes since the latest   */
1605
/* GC to the amount of finalizers created since that GC which triggers  */
1606
/* the collection instead heap expansion.  Has no effect in the         */
1607
/* incremental mode.                                                    */
1608
#if defined(GC_ALLOCD_BYTES_PER_FINALIZER) && !defined(CPPCHECK)
1609
  STATIC word GC_allocd_bytes_per_finalizer = GC_ALLOCD_BYTES_PER_FINALIZER;
1610
#else
1611
  STATIC word GC_allocd_bytes_per_finalizer = 10000;
1612
#endif
1613

1614
GC_API void GC_CALL GC_set_allocd_bytes_per_finalizer(GC_word value)
2✔
1615
{
1616
  GC_allocd_bytes_per_finalizer = value;
2✔
1617
}
2✔
1618

1619
GC_API GC_word GC_CALL GC_get_allocd_bytes_per_finalizer(void)
2✔
1620
{
1621
  return GC_allocd_bytes_per_finalizer;
2✔
1622
}
1623

1624
static word last_fo_entries = 0;
1625
static word last_bytes_finalized = 0;
1626

1627
/* Collect or expand heap in an attempt make the indicated number of    */
1628
/* free blocks available.  Should be called until the blocks are        */
1629
/* available (setting retry value to TRUE unless this is the first call */
1630
/* in a loop) or until it fails by returning FALSE.  The flags argument */
1631
/* should be IGNORE_OFF_PAGE or 0.                                      */
1632
GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
10,608✔
1633
                                      unsigned flags,
1634
                                      GC_bool retry)
1635
{
1636
    GC_bool gc_not_stopped = TRUE;
10,608✔
1637
    word blocks_to_get;
1638
    IF_CANCEL(int cancel_state;)
1639

1640
    GC_ASSERT(I_HOLD_LOCK());
10,608✔
1641
    GC_ASSERT(GC_is_initialized);
10,608✔
1642
    DISABLE_CANCEL(cancel_state);
10,608✔
1643
    if (!GC_incremental && !GC_dont_gc &&
20,858✔
1644
        ((GC_dont_expand && GC_bytes_allocd > 0)
10,250✔
1645
         || (GC_fo_entries > last_fo_entries
10,250✔
1646
             && (last_bytes_finalized | GC_bytes_finalized) != 0
314✔
1647
             && (GC_fo_entries - last_fo_entries)
616✔
1648
                * GC_allocd_bytes_per_finalizer > GC_bytes_allocd)
308✔
1649
         || GC_should_collect())) {
9,942✔
1650
      /* Try to do a full collection using 'default' stop_func (unless  */
1651
      /* nothing has been allocated since the latest collection or heap */
1652
      /* expansion is disabled).                                        */
1653
      gc_not_stopped = GC_try_to_collect_inner(
20,350✔
1654
                        GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
20,350✔
1655
                        GC_default_stop_func : GC_never_stop_func);
1656
      if (gc_not_stopped == TRUE || !retry) {
10,175✔
1657
        /* Either the collection hasn't been aborted or this is the     */
1658
        /* first attempt (in a loop).                                   */
1659
        last_fo_entries = GC_fo_entries;
10,175✔
1660
        last_bytes_finalized = GC_bytes_finalized;
10,175✔
1661
        RESTORE_CANCEL(cancel_state);
10,175✔
1662
        return TRUE;
20,783✔
1663
      }
1664
    }
1665

1666
    blocks_to_get = (GC_heapsize - GC_heapsize_at_forced_unmap)
866✔
1667
                        / (HBLKSIZE * GC_free_space_divisor)
433✔
1668
                    + needed_blocks;
1669
    if (blocks_to_get > MAXHINCR) {
433✔
1670
      word slop;
1671

1672
      /* Get the minimum required to make it likely that we can satisfy */
1673
      /* the current request in the presence of black-listing.          */
1674
      /* This will probably be more than MAXHINCR.                      */
1675
      if ((flags & IGNORE_OFF_PAGE) != 0) {
36✔
1676
        slop = 4;
×
1677
      } else {
1678
        slop = 2 * divHBLKSZ(BL_LIMIT);
36✔
1679
        if (slop > needed_blocks) slop = needed_blocks;
36✔
1680
      }
1681
      if (needed_blocks + slop > MAXHINCR) {
36✔
1682
        blocks_to_get = needed_blocks + slop;
18✔
1683
      } else {
1684
        blocks_to_get = MAXHINCR;
18✔
1685
      }
1686
      if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
36✔
1687
        blocks_to_get = divHBLKSZ(GC_WORD_MAX);
10✔
1688
    } else if (blocks_to_get < MINHINCR) {
397✔
1689
      blocks_to_get = MINHINCR;
55✔
1690
    }
1691

1692
    if (GC_max_heapsize > GC_heapsize) {
433✔
1693
      word max_get_blocks = divHBLKSZ(GC_max_heapsize - GC_heapsize);
18✔
1694
      if (blocks_to_get > max_get_blocks)
18✔
1695
        blocks_to_get = max_get_blocks > needed_blocks
18✔
1696
                        ? max_get_blocks : needed_blocks;
1697
    }
1698

1699
#   ifdef USE_MUNMAP
1700
      if (GC_unmap_threshold > 1) {
433✔
1701
        /* Return as much memory to the OS as possible before   */
1702
        /* trying to get memory from it.                        */
1703
        GC_unmap_old(0);
433✔
1704
      }
1705
#   endif
1706
    if (!GC_expand_hp_inner(blocks_to_get)
433✔
1707
        && (blocks_to_get == needed_blocks
18✔
1708
            || !GC_expand_hp_inner(needed_blocks))) {
×
1709
      if (gc_not_stopped == FALSE) {
18✔
1710
        /* Don't increment GC_fail_count here (and no warning).     */
1711
        GC_gcollect_inner();
×
1712
        GC_ASSERT(GC_bytes_allocd == 0);
×
1713
      } else if (GC_fail_count++ < GC_max_retries) {
18✔
1714
        WARN("Out of Memory!  Trying to continue...\n", 0);
×
1715
        GC_gcollect_inner();
×
1716
      } else {
1717
#       if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
1718
#         ifdef USE_MUNMAP
1719
            GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
18✔
1720
#         endif
1721
          WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " MiB."
18✔
1722
               " Returning NULL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
1723
#       endif
1724
        RESTORE_CANCEL(cancel_state);
18✔
1725
        return FALSE;
18✔
1726
      }
1727
    } else if (GC_fail_count) {
415✔
1728
      GC_COND_LOG_PRINTF("Memory available again...\n");
×
1729
    }
1730
    RESTORE_CANCEL(cancel_state);
415✔
1731
    return TRUE;
415✔
1732
}
1733

1734
/*
1735
 * Make sure the object free list for size gran (in granules) is not empty.
1736
 * Return a pointer to the first object on the free list.
1737
 * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
1738
 */
1739
GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
840,204✔
1740
{
1741
    void ** flh = &GC_obj_kinds[kind].ok_freelist[gran];
840,204✔
1742
    GC_bool tried_minor = FALSE;
840,204✔
1743
    GC_bool retry = FALSE;
840,204✔
1744

1745
    GC_ASSERT(I_HOLD_LOCK());
840,204✔
1746
    GC_ASSERT(GC_is_initialized);
840,204✔
1747
    if (0 == gran) return NULL;
840,204✔
1748

1749
    while (NULL == *flh) {
2,527,240✔
1750
      ENTER_GC();
846,832✔
1751
#     ifndef GC_DISABLE_INCREMENTAL
1752
        if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED
846,832✔
1753
            && !GC_dont_gc) {
×
1754
          /* True incremental mode, not just generational.      */
1755
          /* Do our share of marking work.                      */
1756
          GC_collect_a_little_inner(1);
×
1757
        }
1758
#     endif
1759
      /* Sweep blocks for objects of this size */
1760
        GC_ASSERT(!GC_is_full_gc
846,832✔
1761
                  || NULL == GC_obj_kinds[kind].ok_reclaim_list
1762
                  || NULL == GC_obj_kinds[kind].ok_reclaim_list[gran]);
1763
        GC_continue_reclaim(gran, kind);
846,832✔
1764
      EXIT_GC();
846,832✔
1765
#     if defined(CPPCHECK)
1766
        GC_noop1((word)&flh);
1767
#     endif
1768
      if (NULL == *flh) {
846,832✔
1769
        GC_new_hblk(gran, kind);
461,547✔
1770
#       if defined(CPPCHECK)
1771
          GC_noop1((word)&flh);
1772
#       endif
1773
        if (NULL == *flh) {
461,547✔
1774
          ENTER_GC();
6,648✔
1775
          if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
6,648✔
1776
              && !tried_minor && !GC_dont_gc) {
414✔
1777
            GC_collect_a_little_inner(1);
212✔
1778
            tried_minor = TRUE;
212✔
1779
          } else {
1780
            if (!GC_collect_or_expand(1, 0 /* flags */, retry)) {
6,436✔
1781
              EXIT_GC();
×
1782
              return NULL;
×
1783
            }
1784
            retry = TRUE;
6,436✔
1785
          }
1786
          EXIT_GC();
6,648✔
1787
        }
1788
      }
1789
    }
1790
    /* Successful allocation; reset failure count.      */
1791
    GC_fail_count = 0;
840,204✔
1792

1793
    return (ptr_t)(*flh);
840,204✔
1794
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc