• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1487

25 Apr 2023 06:55AM UTC coverage: 76.661% (+0.05%) from 76.615%
1487

push

travis-ci-com

ivmai
Do incremental mark some in GC_collect_a_little even if GC is disabled

Previous behavior was not documented but this API function did nothing
in this case.  That might cause a deadlock in the client code if the
incremental collection was ongoing and the client called
GC_collect_a_little() repeatedly (while the result is true).

The new behavior, for the cause of disabled garbage collection, is to
perform some amount of marking if the incremental collection is
ongoing (but not stopping the world and, thus, not causing the memory
reclaim to start), returning true if there is something more to mark.

* alloc.c (GC_try_to_collect_inner): Change a loop to do-while one.
* alloc.c (GC_collect_a_little): Do not check GC_dont_gc (i.e. call
GC_collect_a_little_inner() unconditionally); add comment.
* include/gc/gc.h (GC_collect_a_little): Update comment (describe the
case when GC is disabled).
* tests/gctest.c (check_heap_stats): Do not call GC_is_disabled(); add
comment.

5 of 5 new or added lines in 2 files covered. (100.0%)

7788 of 10159 relevant lines covered (76.66%)

8863351.24 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.47
/alloc.c
1
/*
2
 * Copyright (c) 1988-1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 1999-2011 Hewlett-Packard Development Company, L.P.
6
 * Copyright (c) 2008-2022 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 *
17
 */
18

19
#include "private/gc_priv.h"
20

21
#if !defined(MACOS) && !defined(MSWINCE)
22
# include <signal.h>
23
# if !defined(GC_NO_TYPES) && !defined(SN_TARGET_PSP2) \
24
     && !defined(__CC_ARM)
25
#   include <sys/types.h>
26
# endif
27
#endif
28

29
/*
30
 * Separate free lists are maintained for different sized objects
31
 * up to MAXOBJBYTES.
32
 * The call GC_allocobj(i,k) ensures that the freelist for
33
 * kind k objects of size i points to a non-empty
34
 * free list. It returns a pointer to the first entry on the free list.
35
 * In a single-threaded world, GC_allocobj may be called to allocate
36
 * an object of small size lb (and NORMAL kind) as follows
37
 * (GC_generic_malloc_inner is a wrapper over GC_allocobj which also
38
 * fills in GC_size_map if needed):
39
 *
40
 *   lg = GC_size_map[lb];
41
 *   op = GC_objfreelist[lg];
42
 *   if (NULL == op) {
43
 *     op = GC_generic_malloc_inner(lb, NORMAL, 0);
44
 *   } else {
45
 *     GC_objfreelist[lg] = obj_link(op);
46
 *     GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
47
 *   }
48
 *
49
 * Note that this is very fast if the free list is non-empty; it should
50
 * only involve the execution of 4 or 5 simple instructions.
51
 * All composite objects on freelists are cleared, except for
52
 * their first word.
53
 */
54

55
/*
56
 * The allocator uses GC_allochblk to allocate large chunks of objects.
57
 * These chunks all start on addresses which are multiples of
58
 * HBLKSZ.   Each allocated chunk has an associated header,
59
 * which can be located quickly based on the address of the chunk.
60
 * (See headers.c for details.)
61
 * This makes it possible to check quickly whether an
62
 * arbitrary address corresponds to an object administered by the
63
 * allocator.
64
 */
65

66
word GC_non_gc_bytes = 0;  /* Number of bytes not intended to be collected */
67

68
word GC_gc_no = 0;
69

70
#ifndef NO_CLOCK
71
  static unsigned long full_gc_total_time = 0; /* in ms, may wrap */
72
  static unsigned full_gc_total_ns_frac = 0; /* fraction of 1 ms */
73
  static GC_bool measure_performance = FALSE;
74
                /* Do performance measurements if set to true (e.g.,    */
75
                /* accumulation of the total time of full collections). */
76

77
  GC_API void GC_CALL GC_start_performance_measurement(void)
2✔
78
  {
79
    measure_performance = TRUE;
2✔
80
  }
2✔
81

82
  GC_API unsigned long GC_CALL GC_get_full_gc_total_time(void)
2✔
83
  {
84
    return full_gc_total_time;
2✔
85
  }
86
#endif /* !NO_CLOCK */
87

88
#ifndef GC_DISABLE_INCREMENTAL
89
  GC_INNER GC_bool GC_incremental = FALSE; /* By default, stop the world. */
90
  STATIC GC_bool GC_should_start_incremental_collection = FALSE;
91
#endif
92

93
GC_API int GC_CALL GC_is_incremental_mode(void)
16✔
94
{
95
  return (int)GC_incremental;
16✔
96
}
97

98
#ifdef THREADS
99
  int GC_parallel = FALSE;      /* By default, parallel GC is off.      */
100
#endif
101

102
#if defined(GC_FULL_FREQ) && !defined(CPPCHECK)
103
  int GC_full_freq = GC_FULL_FREQ;
104
#else
105
  int GC_full_freq = 19;   /* Every 20th collection is a full   */
106
                           /* collection, whether we need it    */
107
                           /* or not.                           */
108
#endif
109

110
STATIC GC_bool GC_need_full_gc = FALSE;
111
                           /* Need full GC due to heap growth.  */
112

113
#ifdef THREAD_LOCAL_ALLOC
114
  GC_INNER GC_bool GC_world_stopped = FALSE;
115
#endif
116

117
STATIC GC_bool GC_disable_automatic_collection = FALSE;
118

119
GC_API void GC_CALL GC_set_disable_automatic_collection(int value)
2✔
120
{
121
  LOCK();
2✔
122
  GC_disable_automatic_collection = (GC_bool)value;
2✔
123
  UNLOCK();
2✔
124
}
2✔
125

126
GC_API int GC_CALL GC_get_disable_automatic_collection(void)
2✔
127
{
128
  int value;
129

130
  LOCK();
2✔
131
  value = (int)GC_disable_automatic_collection;
2✔
132
  UNLOCK();
2✔
133
  return value;
2✔
134
}
135

136
STATIC word GC_used_heap_size_after_full = 0;
137

138
/* Version macros are now defined in gc_version.h, which is included by */
139
/* gc.h, which is included by gc_priv.h.                                */
140
#ifndef GC_NO_VERSION_VAR
141
  EXTERN_C_BEGIN
142
  extern const unsigned GC_version;
143
  EXTERN_C_END
144
  const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
145
                        (GC_VERSION_MINOR << 8) | GC_VERSION_MICRO);
146
#endif
147

148
GC_API unsigned GC_CALL GC_get_version(void)
2✔
149
{
150
  return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
2✔
151
          GC_VERSION_MICRO;
152
}
153

154
/* some more variables */
155

156
#ifdef GC_DONT_EXPAND
157
  int GC_dont_expand = TRUE;
158
#else
159
  int GC_dont_expand = FALSE;
160
#endif
161

162
#if defined(GC_FREE_SPACE_DIVISOR) && !defined(CPPCHECK)
163
  word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR; /* must be > 0 */
164
#else
165
  word GC_free_space_divisor = 3;
166
#endif
167

168
GC_INNER int GC_CALLBACK GC_never_stop_func(void)
7,259,846✔
169
{
170
  return FALSE;
7,259,846✔
171
}
172

173
#if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
174
  unsigned long GC_time_limit = GC_TIME_LIMIT;
175
                           /* We try to keep pause times from exceeding  */
176
                           /* this by much. In milliseconds.             */
177
#elif defined(PARALLEL_MARK)
178
  unsigned long GC_time_limit = GC_TIME_UNLIMITED;
179
                        /* The parallel marker cannot be interrupted for */
180
                        /* now, so the time limit is absent by default.  */
181
#else
182
  unsigned long GC_time_limit = 15;
183
#endif
184

185
#ifndef NO_CLOCK
186
  STATIC unsigned long GC_time_lim_nsec = 0;
187
                        /* The nanoseconds add-on to GC_time_limit      */
188
                        /* value.  Not updated by GC_set_time_limit().  */
189
                        /* Ignored if the value of GC_time_limit is     */
190
                        /* GC_TIME_UNLIMITED.                           */
191

192
# define TV_NSEC_LIMIT (1000UL * 1000) /* amount of nanoseconds in 1 ms */
193

194
  GC_API void GC_CALL GC_set_time_limit_tv(struct GC_timeval_s tv)
2✔
195
  {
196
    GC_ASSERT(tv.tv_ms <= GC_TIME_UNLIMITED);
2✔
197
    GC_ASSERT(tv.tv_nsec < TV_NSEC_LIMIT);
2✔
198
    GC_time_limit = tv.tv_ms;
2✔
199
    GC_time_lim_nsec = tv.tv_nsec;
2✔
200
  }
2✔
201

202
  GC_API struct GC_timeval_s GC_CALL GC_get_time_limit_tv(void)
2✔
203
  {
204
    struct GC_timeval_s tv;
205

206
    tv.tv_ms = GC_time_limit;
2✔
207
    tv.tv_nsec = GC_time_lim_nsec;
2✔
208
    return tv;
2✔
209
  }
210

211
  STATIC CLOCK_TYPE GC_start_time = CLOCK_TYPE_INITIALIZER;
212
                                /* Time at which we stopped world.      */
213
                                /* used only in GC_timeout_stop_func.   */
214
#endif /* !NO_CLOCK */
215

216
STATIC int GC_n_attempts = 0;   /* Number of attempts at finishing      */
217
                                /* collection within GC_time_limit.     */
218

219
STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
220
                                /* Accessed holding the allocator lock. */
221

222
GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
2✔
223
{
224
  GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
2✔
225
  LOCK();
2✔
226
  GC_default_stop_func = stop_func;
2✔
227
  UNLOCK();
2✔
228
}
2✔
229

230
GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
2✔
231
{
232
  GC_stop_func stop_func;
233

234
  LOCK();
2✔
235
  stop_func = GC_default_stop_func;
2✔
236
  UNLOCK();
2✔
237
  return stop_func;
2✔
238
}
239

240
#if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
241
# define GC_timeout_stop_func GC_default_stop_func
242
#else
243
  STATIC int GC_CALLBACK GC_timeout_stop_func(void)
6,441,163✔
244
  {
245
    CLOCK_TYPE current_time;
246
    static unsigned count = 0;
247
    unsigned long time_diff, nsec_diff;
248

249
    if (GC_default_stop_func())
6,441,163✔
250
      return TRUE;
6,441,163✔
251

252
    if (GC_time_limit == GC_TIME_UNLIMITED || (count++ & 3) != 0)
6,441,163✔
253
      return FALSE;
6,441,163✔
254

255
    GET_TIME(current_time);
×
256
    time_diff = MS_TIME_DIFF(current_time,GC_start_time);
×
257
    nsec_diff = NS_FRAC_TIME_DIFF(current_time, GC_start_time);
×
258
#   if defined(CPPCHECK)
259
      GC_noop1((word)&nsec_diff);
260
#   endif
261
    if (time_diff >= GC_time_limit
×
262
        && (time_diff > GC_time_limit || nsec_diff >= GC_time_lim_nsec)) {
×
263
      GC_COND_LOG_PRINTF("Abandoning stopped marking after %lu ms %lu ns"
×
264
                         " (attempt %d)\n",
265
                         time_diff, nsec_diff, GC_n_attempts);
266
      return TRUE;
×
267
    }
268

269
    return FALSE;
×
270
  }
271
#endif /* !GC_DISABLE_INCREMENTAL */
272

273
#ifdef THREADS
274
  GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
275
#endif
276

277
static size_t min_bytes_allocd_minimum = 1;
278
                        /* The lowest value returned by min_bytes_allocd(). */
279

280
GC_API void GC_CALL GC_set_min_bytes_allocd(size_t value)
2✔
281
{
282
    GC_ASSERT(value > 0);
2✔
283
    min_bytes_allocd_minimum = value;
2✔
284
}
2✔
285

286
GC_API size_t GC_CALL GC_get_min_bytes_allocd(void)
2✔
287
{
288
    return min_bytes_allocd_minimum;
2✔
289
}
290

291
/* Return the minimum number of bytes that must be allocated between    */
292
/* collections to amortize the collection cost.  Should be non-zero.    */
293
static word min_bytes_allocd(void)
43,713✔
294
{
295
    word result;
296
    word stack_size;
297
    word total_root_size;       /* includes double stack size,  */
298
                                /* since the stack is expensive */
299
                                /* to scan.                     */
300
    word scan_size;             /* Estimate of memory to be scanned     */
301
                                /* during normal GC.                    */
302

303
#   ifdef THREADS
304
      if (GC_need_to_lock) {
43,713✔
305
        /* We are multi-threaded... */
306
        stack_size = GC_total_stacksize;
19,375✔
307
        /* For now, we just use the value computed during the latest GC. */
308
#       ifdef DEBUG_THREADS
309
          GC_log_printf("Total stacks size: %lu\n",
310
                        (unsigned long)stack_size);
311
#       endif
312
      } else
313
#   endif
314
    /* else*/ {
315
#     ifdef STACK_NOT_SCANNED
316
        stack_size = 0;
317
#     elif defined(STACK_GROWS_UP)
318
        stack_size = GC_approx_sp() - GC_stackbottom;
319
#     else
320
        stack_size = GC_stackbottom - GC_approx_sp();
24,338✔
321
#     endif
322
    }
323

324
    total_root_size = 2 * stack_size + GC_root_size;
43,713✔
325
    scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
43,713✔
326
                + total_root_size;
327
    result = scan_size / GC_free_space_divisor;
43,713✔
328
    if (GC_incremental) {
43,713✔
329
      result /= 2;
35,265✔
330
    }
331
    return result > min_bytes_allocd_minimum
43,713✔
332
            ? result : min_bytes_allocd_minimum;
43,713✔
333
}
334

335
STATIC word GC_non_gc_bytes_at_gc = 0;
336
                /* Number of explicitly managed bytes of storage        */
337
                /* at last collection.                                  */
338

339
/* Return the number of bytes allocated, adjusted for explicit storage  */
340
/* management, etc..  This number is used in deciding when to trigger   */
341
/* collections.                                                         */
342
STATIC word GC_adj_bytes_allocd(void)
10,186,391✔
343
{
344
    signed_word result;
345
    signed_word expl_managed = (signed_word)GC_non_gc_bytes
20,372,782✔
346
                                - (signed_word)GC_non_gc_bytes_at_gc;
10,186,391✔
347

348
    /* Don't count what was explicitly freed, or newly allocated for    */
349
    /* explicit management.  Note that deallocating an explicitly       */
350
    /* managed object should not alter result, assuming the client      */
351
    /* is playing by the rules.                                         */
352
    result = (signed_word)GC_bytes_allocd
20,372,782✔
353
             + (signed_word)GC_bytes_dropped
10,186,391✔
354
             - (signed_word)GC_bytes_freed
10,186,391✔
355
             + (signed_word)GC_finalizer_bytes_freed
10,186,391✔
356
             - expl_managed;
357
    if (result > (signed_word)GC_bytes_allocd) {
10,186,391✔
358
        result = GC_bytes_allocd;
1,989,960✔
359
        /* probably client bug or unfortunate scheduling */
360
    }
361
    result += GC_bytes_finalized;
10,186,391✔
362
        /* We count objects enqueued for finalization as though they    */
363
        /* had been reallocated this round. Finalization is user        */
364
        /* visible progress.  And if we don't count this, we have       */
365
        /* stability problems for programs that finalize all objects.   */
366
    if (result < (signed_word)(GC_bytes_allocd >> 3)) {
10,186,391✔
367
        /* Always count at least 1/8 of the allocations.  We don't want */
368
        /* to collect too infrequently, since that would inhibit        */
369
        /* coalescing of free storage blocks.                           */
370
        /* This also makes us partially robust against client bugs.     */
371
        result = (signed_word)(GC_bytes_allocd >> 3);
1,118✔
372
    }
373
    return (word)result;
10,186,391✔
374
}
375

376

377
/* Clear up a few frames worth of garbage left at the top of the stack. */
378
/* This is used to prevent us from accidentally treating garbage left   */
379
/* on the stack by other parts of the collector as roots.  This         */
380
/* differs from the code in misc.c, which actually tries to keep the    */
381
/* stack clear of long-lived, client-generated garbage.                 */
382
STATIC void GC_clear_a_few_frames(void)
27,935✔
383
{
384
#   ifndef CLEAR_NWORDS
385
#     define CLEAR_NWORDS 64
386
#   endif
387
    volatile word frames[CLEAR_NWORDS];
388
    BZERO((word *)frames, CLEAR_NWORDS * sizeof(word));
27,935✔
389
}
27,935✔
390

391
GC_API void GC_CALL GC_start_incremental_collection(void)
×
392
{
393
# ifndef GC_DISABLE_INCREMENTAL
394
    if (!GC_incremental) return;
×
395

396
    LOCK();
×
397
    GC_should_start_incremental_collection = TRUE;
×
398
    if (!GC_dont_gc) {
×
399
      ENTER_GC();
×
400
      GC_collect_a_little_inner(1);
×
401
      EXIT_GC();
×
402
    }
403
    UNLOCK();
×
404
# endif
405
}
406

407
/* Have we allocated enough to amortize a collection? */
408
GC_INNER GC_bool GC_should_collect(void)
10,192,442✔
409
{
410
    static word last_min_bytes_allocd;
411
    static word last_gc_no;
412

413
    GC_ASSERT(I_HOLD_LOCK());
10,192,442✔
414
    if (last_gc_no != GC_gc_no) {
10,192,442✔
415
      last_min_bytes_allocd = min_bytes_allocd();
27,455✔
416
      last_gc_no = GC_gc_no;
27,455✔
417
    }
418
# ifndef GC_DISABLE_INCREMENTAL
419
    if (GC_should_start_incremental_collection) {
10,192,442✔
420
      GC_should_start_incremental_collection = FALSE;
×
421
      return TRUE;
×
422
    }
423
# endif
424
    if (GC_disable_automatic_collection) return FALSE;
10,192,442✔
425

426
    if (GC_last_heap_growth_gc_no == GC_gc_no)
10,192,442✔
427
      return TRUE; /* avoid expanding past limits used by blacklisting  */
6,050✔
428

429
    return GC_adj_bytes_allocd() >= last_min_bytes_allocd;
10,186,392✔
430
}
431

432
/* STATIC */ GC_start_callback_proc GC_start_call_back = 0;
433
                        /* Called at start of full collections.         */
434
                        /* Not called if 0.  Called with the allocation */
435
                        /* lock held.  Not used by GC itself.           */
436

437
GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn)
2✔
438
{
439
    LOCK();
2✔
440
    GC_start_call_back = fn;
2✔
441
    UNLOCK();
2✔
442
}
2✔
443

444
GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void)
2✔
445
{
446
    GC_start_callback_proc fn;
447

448
    LOCK();
2✔
449
    fn = GC_start_call_back;
2✔
450
    UNLOCK();
2✔
451
    return fn;
2✔
452
}
453

454
GC_INLINE void GC_notify_full_gc(void)
12,111✔
455
{
456
    if (GC_start_call_back != 0) {
12,111✔
457
        (*GC_start_call_back)();
×
458
    }
459
}
12,111✔
460

461
STATIC GC_bool GC_is_full_gc = FALSE;
462

463
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
464
STATIC void GC_finish_collection(void);
465

466
/* Initiate a garbage collection if appropriate.  Choose judiciously    */
467
/* between partial, full, and stop-world collections.                   */
468
STATIC void GC_maybe_gc(void)
8,401,924✔
469
{
470
  static int n_partial_gcs = 0;
471

472
  GC_ASSERT(I_HOLD_LOCK());
8,401,924✔
473
  ASSERT_CANCEL_DISABLED();
8,401,924✔
474
  if (!GC_should_collect()) return;
8,401,924✔
475

476
  if (!GC_incremental) {
17,245✔
477
    GC_gcollect_inner();
×
478
    return;
×
479
  }
480

481
  GC_ASSERT(!GC_collection_in_progress());
17,245✔
482
# ifdef PARALLEL_MARK
483
    if (GC_parallel)
17,245✔
484
      GC_wait_for_reclaim();
9,142✔
485
# endif
486
  if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
17,245✔
487
    GC_COND_LOG_PRINTF(
1,422✔
488
                "***>Full mark for collection #%lu after %lu allocd bytes\n",
489
                (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
×
490
    GC_promote_black_lists();
1,422✔
491
    (void)GC_reclaim_all((GC_stop_func)0, TRUE);
1,422✔
492
    GC_notify_full_gc();
1,422✔
493
    GC_clear_marks();
1,422✔
494
    n_partial_gcs = 0;
1,422✔
495
    GC_is_full_gc = TRUE;
1,422✔
496
  } else {
497
    n_partial_gcs++;
15,823✔
498
  }
499

500
  /* Try to mark with the world stopped.  If we run out of      */
501
  /* time, this turns into an incremental marking.              */
502
# ifndef NO_CLOCK
503
    if (GC_time_limit != GC_TIME_UNLIMITED) GET_TIME(GC_start_time);
17,245✔
504
# endif
505
  if (GC_stopped_mark(GC_timeout_stop_func)) {
17,245✔
506
#   ifdef SAVE_CALL_CHAIN
507
      GC_save_callers(GC_last_stack);
508
#   endif
509
    GC_finish_collection();
17,246✔
510
  } else if (!GC_is_full_gc) {
×
511
    /* Count this as the first attempt. */
512
    GC_n_attempts++;
×
513
  }
514
}
515

516
STATIC GC_on_collection_event_proc GC_on_collection_event = 0;
517

518
GC_API void GC_CALL GC_set_on_collection_event(GC_on_collection_event_proc fn)
2✔
519
{
520
    /* fn may be 0 (means no event notifier). */
521
    LOCK();
2✔
522
    GC_on_collection_event = fn;
2✔
523
    UNLOCK();
2✔
524
}
2✔
525

526
GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void)
2✔
527
{
528
    GC_on_collection_event_proc fn;
529

530
    LOCK();
2✔
531
    fn = GC_on_collection_event;
2✔
532
    UNLOCK();
2✔
533
    return fn;
2✔
534
}
535

536
/* Stop the world garbage collection.  If stop_func is not      */
537
/* GC_never_stop_func then abort if stop_func returns TRUE.     */
538
/* Return TRUE if we successfully completed the collection.     */
539
GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
10,852✔
540
{
541
#   ifndef NO_CLOCK
542
      CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
10,852✔
543
      GC_bool start_time_valid;
544
#   endif
545

546
    ASSERT_CANCEL_DISABLED();
10,852✔
547
    GC_ASSERT(I_HOLD_LOCK());
10,852✔
548
    GC_ASSERT(GC_is_initialized);
10,852✔
549
    if (GC_dont_gc || (*stop_func)()) return FALSE;
21,707✔
550
    if (GC_on_collection_event)
10,689✔
551
      GC_on_collection_event(GC_EVENT_START);
×
552
    if (GC_incremental && GC_collection_in_progress()) {
10,689✔
553
      GC_COND_LOG_PRINTF(
×
554
            "GC_try_to_collect_inner: finishing collection in progress\n");
555
      /* Just finish collection already in progress.    */
556
        do {
557
            if ((*stop_func)()) {
×
558
              /* TODO: Notify GC_EVENT_ABANDON */
559
              return FALSE;
×
560
            }
561
            ENTER_GC();
×
562
            GC_collect_a_little_inner(1);
×
563
            EXIT_GC();
×
564
        } while (GC_collection_in_progress());
×
565
    }
566
    GC_notify_full_gc();
10,689✔
567
#   ifndef NO_CLOCK
568
      start_time_valid = FALSE;
10,689✔
569
      if ((GC_print_stats | (int)measure_performance) != 0) {
10,689✔
570
        if (GC_print_stats)
2,155✔
571
          GC_log_printf("Initiating full world-stop collection!\n");
×
572
        start_time_valid = TRUE;
2,155✔
573
        GET_TIME(start_time);
2,155✔
574
      }
575
#   endif
576
    GC_promote_black_lists();
10,689✔
577
    /* Make sure all blocks have been reclaimed, so sweep routines      */
578
    /* don't see cleared mark bits.                                     */
579
    /* If we're guaranteed to finish, then this is unnecessary.         */
580
    /* In the find_leak case, we have to finish to guarantee that       */
581
    /* previously unmarked objects are not reported as leaks.           */
582
#       ifdef PARALLEL_MARK
583
          if (GC_parallel)
10,689✔
584
            GC_wait_for_reclaim();
3,545✔
585
#       endif
586
        if ((GC_find_leak || stop_func != GC_never_stop_func)
10,690✔
587
            && !GC_reclaim_all(stop_func, FALSE)) {
66✔
588
            /* Aborted.  So far everything is still consistent. */
589
            /* TODO: Notify GC_EVENT_ABANDON */
590
            return FALSE;
×
591
        }
592
    GC_invalidate_mark_state();  /* Flush mark stack.   */
10,690✔
593
    GC_clear_marks();
10,690✔
594
#   ifdef SAVE_CALL_CHAIN
595
        GC_save_callers(GC_last_stack);
596
#   endif
597
    GC_is_full_gc = TRUE;
10,690✔
598
    if (!GC_stopped_mark(stop_func)) {
10,690✔
599
      if (!GC_incremental) {
×
600
        /* We're partially done and have no way to complete or use      */
601
        /* current work.  Reestablish invariants as cheaply as          */
602
        /* possible.                                                    */
603
        GC_invalidate_mark_state();
×
604
        GC_unpromote_black_lists();
×
605
      } /* else we claim the world is already still consistent.  We'll  */
606
        /* finish incrementally.                                        */
607
      /* TODO: Notify GC_EVENT_ABANDON */
608
      return FALSE;
×
609
    }
610
    GC_finish_collection();
10,692✔
611
#   ifndef NO_CLOCK
612
      if (start_time_valid) {
10,692✔
613
        CLOCK_TYPE current_time;
614
        unsigned long time_diff, ns_frac_diff;
615

616
        GET_TIME(current_time);
2,158✔
617
        time_diff = MS_TIME_DIFF(current_time, start_time);
2,158✔
618
        ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
2,158✔
619
        if (measure_performance) {
2,158✔
620
          full_gc_total_time += time_diff; /* may wrap */
2,158✔
621
          full_gc_total_ns_frac += (unsigned)ns_frac_diff;
2,158✔
622
          if (full_gc_total_ns_frac >= 1000000U) {
2,158✔
623
            /* Overflow of the nanoseconds part. */
624
            full_gc_total_ns_frac -= 1000000U;
1,100✔
625
            full_gc_total_time++;
1,100✔
626
          }
627
        }
628
        if (GC_print_stats)
2,158✔
629
          GC_log_printf("Complete collection took %lu ms %lu ns\n",
×
630
                        time_diff, ns_frac_diff);
631
      }
632
#   endif
633
    if (GC_on_collection_event)
10,692✔
634
      GC_on_collection_event(GC_EVENT_END);
×
635
    return TRUE;
10,692✔
636
}
637

638
/* The number of extra calls to GC_mark_some that we have made. */
639
STATIC int GC_deficit = 0;
640

641
/* The default value of GC_rate.        */
642
#ifndef GC_RATE
643
# define GC_RATE 10
644
#endif
645

646
/* When GC_collect_a_little_inner() performs n units of GC work, a unit */
647
/* is intended to touch roughly GC_rate pages.  (But, every once in     */
648
/* a while, we do more than that.)  This needs to be a fairly large     */
649
/* number with our current incremental GC strategy, since otherwise we  */
650
/* allocate too much during GC, and the cleanup gets expensive.         */
651
STATIC int GC_rate = GC_RATE;
652

653
GC_API void GC_CALL GC_set_rate(int value)
2✔
654
{
655
    GC_ASSERT(value > 0);
2✔
656
    GC_rate = value;
2✔
657
}
2✔
658

659
GC_API int GC_CALL GC_get_rate(void)
2✔
660
{
661
    return GC_rate;
2✔
662
}
663

664
/* The default maximum number of prior attempts at world stop marking.  */
665
#ifndef MAX_PRIOR_ATTEMPTS
666
# define MAX_PRIOR_ATTEMPTS 3
667
#endif
668

669
/* The maximum number of prior attempts at world stop marking.          */
670
/* A value of 1 means that we finish the second time, no matter how     */
671
/* long it takes.  Does not count the initial root scan for a full GC.  */
672
static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
673

674
GC_API void GC_CALL GC_set_max_prior_attempts(int value)
2✔
675
{
676
    GC_ASSERT(value >= 0);
2✔
677
    max_prior_attempts = value;
2✔
678
}
2✔
679

680
GC_API int GC_CALL GC_get_max_prior_attempts(void)
2✔
681
{
682
    return max_prior_attempts;
2✔
683
}
684

685
GC_INNER void GC_collect_a_little_inner(int n)
9,034,288✔
686
{
687
    IF_CANCEL(int cancel_state;)
688

689
    GC_ASSERT(I_HOLD_LOCK());
9,034,288✔
690
    GC_ASSERT(GC_is_initialized);
9,034,288✔
691
    DISABLE_CANCEL(cancel_state);
9,034,288✔
692
    if (GC_incremental && GC_collection_in_progress()) {
9,034,289✔
693
        int i;
694
        int max_deficit = GC_rate * n;
×
695

696
#       ifdef PARALLEL_MARK
697
            if (GC_time_limit != GC_TIME_UNLIMITED)
×
698
                GC_parallel_mark_disabled = TRUE;
×
699
#       endif
700
        for (i = GC_deficit; i < max_deficit; i++) {
×
701
            if (GC_mark_some(NULL))
×
702
                break;
×
703
        }
704
#       ifdef PARALLEL_MARK
705
            GC_parallel_mark_disabled = FALSE;
×
706
#       endif
707

708
        if (i < max_deficit && !GC_dont_gc) {
×
709
            GC_ASSERT(!GC_collection_in_progress());
×
710
            /* Need to follow up with a full collection.        */
711
#           ifdef SAVE_CALL_CHAIN
712
                GC_save_callers(GC_last_stack);
713
#           endif
714
#           ifdef PARALLEL_MARK
715
                if (GC_parallel)
×
716
                    GC_wait_for_reclaim();
×
717
#           endif
718
#           ifndef NO_CLOCK
719
                if (GC_time_limit != GC_TIME_UNLIMITED
×
720
                        && GC_n_attempts < max_prior_attempts)
×
721
                    GET_TIME(GC_start_time);
×
722
#           endif
723
            if (GC_stopped_mark(GC_n_attempts < max_prior_attempts ?
×
724
                                GC_timeout_stop_func : GC_never_stop_func)) {
725
                GC_finish_collection();
×
726
            } else {
727
                GC_n_attempts++;
×
728
            }
729
        }
730
        if (GC_deficit > 0) {
×
731
            GC_deficit -= max_deficit;
×
732
            if (GC_deficit < 0)
×
733
                GC_deficit = 0;
×
734
        }
735
    } else if (!GC_dont_gc) {
9,034,289✔
736
        GC_maybe_gc();
8,401,923✔
737
    }
738
    RESTORE_CANCEL(cancel_state);
9,034,295✔
739
}
9,034,295✔
740

741
GC_INNER void (*GC_check_heap)(void) = 0;
742
GC_INNER void (*GC_print_all_smashed)(void) = 0;
743

744
GC_API int GC_CALL GC_collect_a_little(void)
4,763,430✔
745
{
746
    int result;
747

748
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
4,763,430✔
749
    LOCK();
4,763,438✔
750
    ENTER_GC();
4,763,444✔
751
    /* Note: if the collection is in progress, this may do marking (not */
752
    /* stopping the world) even in case of disabled GC.                 */
753
    GC_collect_a_little_inner(1);
4,763,444✔
754
    EXIT_GC();
4,763,444✔
755
    result = (int)GC_collection_in_progress();
4,763,444✔
756
    UNLOCK();
4,763,444✔
757
    if (!result && GC_debugging_started) GC_print_all_smashed();
4,763,419✔
758
    return result;
4,763,419✔
759
}
760

761
#ifndef NO_CLOCK
762
  /* Variables for world-stop average delay time statistic computation. */
763
  /* "divisor" is incremented every world-stop and halved when reached  */
764
  /* its maximum (or upon "total_time" overflow).                       */
765
  static unsigned world_stopped_total_time = 0;
766
  static unsigned world_stopped_total_divisor = 0;
767
# ifndef MAX_TOTAL_TIME_DIVISOR
768
    /* We shall not use big values here (so "outdated" delay time       */
769
    /* values would have less impact on "average" delay time value than */
770
    /* newer ones).                                                     */
771
#   define MAX_TOTAL_TIME_DIVISOR 1000
772
# endif
773
#endif /* !NO_CLOCK */
774

775
#ifdef USE_MUNMAP
776
# ifndef MUNMAP_THRESHOLD
777
#   define MUNMAP_THRESHOLD 7
778
# endif
779
  GC_INNER unsigned GC_unmap_threshold = MUNMAP_THRESHOLD;
780

781
# define IF_USE_MUNMAP(x) x
782
# define COMMA_IF_USE_MUNMAP(x) /* comma */, x
783
#else
784
# define IF_USE_MUNMAP(x) /* empty */
785
# define COMMA_IF_USE_MUNMAP(x) /* empty */
786
#endif
787

788
/* We stop the world and mark from all roots.  If stop_func() ever      */
789
/* returns TRUE, we may fail and return FALSE.  Increment GC_gc_no if   */
790
/* we succeed.                                                          */
791
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
27,935✔
792
{
793
    int i;
794
    ptr_t cold_gc_frame = GC_approx_sp();
27,935✔
795
#   ifndef NO_CLOCK
796
      CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
27,935✔
797
#   endif
798

799
    GC_ASSERT(I_HOLD_LOCK());
27,935✔
800
    GC_ASSERT(GC_is_initialized);
27,935✔
801
#   if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
802
        GC_add_current_malloc_heap();
803
#   endif
804
#   if defined(REGISTER_LIBRARIES_EARLY)
805
        GC_cond_register_dynamic_libraries();
27,935✔
806
#   endif
807

808
#   ifndef NO_CLOCK
809
      if (GC_PRINT_STATS_FLAG)
27,935✔
810
        GET_TIME(start_time);
×
811
#   endif
812

813
#   if !defined(GC_NO_FINALIZATION) && !defined(GC_TOGGLE_REFS_NOT_NEEDED)
814
      GC_process_togglerefs();
27,935✔
815
#   endif
816
#   ifdef THREADS
817
      if (GC_on_collection_event)
27,935✔
818
        GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD);
×
819
#   endif
820
    STOP_WORLD();
27,935✔
821
#   ifdef THREADS
822
      if (GC_on_collection_event)
27,935✔
823
        GC_on_collection_event(GC_EVENT_POST_STOP_WORLD);
×
824
#   endif
825

826
#   ifdef THREAD_LOCAL_ALLOC
827
      GC_world_stopped = TRUE;
27,935✔
828
#   endif
829
        /* Output blank line for convenience here */
830
    GC_COND_LOG_PRINTF(
27,935✔
831
              "\n--> Marking for collection #%lu after %lu allocated bytes\n",
832
              (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
×
833
#   ifdef MAKE_BACK_GRAPH
834
      if (GC_print_back_height) {
835
        GC_build_back_graph();
836
      }
837
#   endif
838

839
    /* Mark from all roots.  */
840
        if (GC_on_collection_event)
27,935✔
841
          GC_on_collection_event(GC_EVENT_MARK_START);
×
842

843
        /* Minimize junk left in my registers and on the stack */
844
            GC_clear_a_few_frames();
27,935✔
845
            GC_noop6(0,0,0,0,0,0);
27,935✔
846

847
        GC_initiate_gc();
27,935✔
848
#       ifdef PARALLEL_MARK
849
          if (stop_func != GC_never_stop_func)
27,935✔
850
            GC_parallel_mark_disabled = TRUE;
17,245✔
851
#       endif
852
        for (i = 0; !(*stop_func)(); i++) {
7,248,555✔
853
          if (GC_mark_some(cold_gc_frame)) {
7,248,555✔
854
#           ifdef PARALLEL_MARK
855
              if (GC_parallel && GC_parallel_mark_disabled) {
27,935✔
856
                GC_COND_LOG_PRINTF("Stopped marking done after %d iterations"
9,142✔
857
                                   " with disabled parallel marker\n", i);
858
              }
859
#           endif
860
            i = -1;
27,935✔
861
            break;
27,935✔
862
          }
863
        }
864
#       ifdef PARALLEL_MARK
865
          GC_parallel_mark_disabled = FALSE;
27,935✔
866
#       endif
867

868
        if (i >= 0) {
27,935✔
869
          GC_COND_LOG_PRINTF("Abandoned stopped marking after"
×
870
                             " %d iterations\n", i);
871
          GC_deficit = i;       /* Give the mutator a chance.   */
×
872
#         ifdef THREAD_LOCAL_ALLOC
873
            GC_world_stopped = FALSE;
×
874
#         endif
875

876
#         ifdef THREADS
877
            if (GC_on_collection_event)
×
878
              GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
×
879
#         endif
880

881
          START_WORLD();
×
882

883
#         ifdef THREADS
884
            if (GC_on_collection_event)
×
885
              GC_on_collection_event(GC_EVENT_POST_START_WORLD);
×
886
#         endif
887

888
          /* TODO: Notify GC_EVENT_MARK_ABANDON */
889
          return FALSE;
27,938✔
890
        }
891

892
    GC_gc_no++;
27,935✔
893
    /* Check all debugged objects for consistency */
894
    if (GC_debugging_started) {
27,935✔
895
      (*GC_check_heap)();
249✔
896
    }
897
    if (GC_on_collection_event) {
27,935✔
898
      GC_on_collection_event(GC_EVENT_MARK_END);
×
899
#     ifdef THREADS
900
        GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
×
901
#     endif
902
    }
903
#   ifdef THREAD_LOCAL_ALLOC
904
      GC_world_stopped = FALSE;
27,935✔
905
#   endif
906

907
    START_WORLD();
27,935✔
908

909
#   ifdef THREADS
910
      if (GC_on_collection_event)
27,938✔
911
        GC_on_collection_event(GC_EVENT_POST_START_WORLD);
×
912
#   endif
913

914
#   ifndef NO_CLOCK
915
      if (GC_PRINT_STATS_FLAG) {
27,938✔
916
        unsigned long time_diff;
917
        unsigned total_time, divisor;
918
        CLOCK_TYPE current_time;
919

920
        GET_TIME(current_time);
×
921
        time_diff = MS_TIME_DIFF(current_time,start_time);
×
922

923
        /* Compute new world-stop delay total time */
924
        total_time = world_stopped_total_time;
×
925
        divisor = world_stopped_total_divisor;
×
926
        if (total_time > (((unsigned)-1) >> 1)
×
927
            || divisor >= MAX_TOTAL_TIME_DIVISOR) {
×
928
          /* Halve values if overflow occurs */
929
          total_time >>= 1;
×
930
          divisor >>= 1;
×
931
        }
932
        total_time += time_diff < (((unsigned)-1) >> 1) ?
×
933
                        (unsigned)time_diff : ((unsigned)-1) >> 1;
×
934
        /* Update old world_stopped_total_time and its divisor */
935
        world_stopped_total_time = total_time;
×
936
        world_stopped_total_divisor = ++divisor;
×
937

938
        GC_ASSERT(divisor != 0);
×
939
        GC_log_printf("World-stopped marking took %lu ms %lu ns"
×
940
                      " (%u ms in average)\n",
941
                      time_diff, NS_FRAC_TIME_DIFF(current_time, start_time),
×
942
                      total_time / divisor);
943
      }
944
#   endif
945
    return TRUE;
27,938✔
946
}
947

948
/* Set all mark bits for the free list whose first entry is q   */
949
GC_INNER void GC_set_fl_marks(ptr_t q)
1,274,916✔
950
{
951
    if (q /* != NULL */) { /* CPPCHECK */
1,274,916✔
952
      struct hblk *h = HBLKPTR(q);
1,274,916✔
953
      struct hblk *last_h = h;
1,274,916✔
954
      hdr *hhdr = HDR(h);
1,274,916✔
955
      IF_PER_OBJ(word sz = hhdr->hb_sz;)
956

957
      for (;;) {
958
        word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
35,445,479✔
959

960
        if (!mark_bit_from_hdr(hhdr, bit_no)) {
35,445,479✔
961
          set_mark_bit_from_hdr(hhdr, bit_no);
15,985,643✔
962
          ++hhdr -> hb_n_marks;
15,985,643✔
963
        }
964

965
        q = (ptr_t)obj_link(q);
35,445,479✔
966
        if (q == NULL)
35,445,479✔
967
          break;
1,274,916✔
968

969
        h = HBLKPTR(q);
34,170,563✔
970
        if (h != last_h) {
34,170,563✔
971
          last_h = h;
85,009✔
972
          hhdr = HDR(h);
85,009✔
973
          IF_PER_OBJ(sz = hhdr->hb_sz;)
974
        }
975
      }
34,170,563✔
976
    }
977
}
1,274,916✔
978

979
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
980
  /* Check that all mark bits for the free list whose first entry is    */
981
  /* (*pfreelist) are set.  Check skipped if points to a special value. */
982
  void GC_check_fl_marks(void **pfreelist)
21,009,919✔
983
  {
984
    /* TODO: There is a data race with GC_FAST_MALLOC_GRANS (which does */
985
    /* not do atomic updates to the free-list).  The race seems to be   */
986
    /* harmless, and for now we just skip this check in case of TSan.   */
987
#   if defined(AO_HAVE_load_acquire_read) && !defined(THREAD_SANITIZER)
988
      AO_t *list = (AO_t *)AO_load_acquire_read((AO_t *)pfreelist);
21,009,919✔
989
                /* Atomic operations are used because the world is running. */
990
      AO_t *prev;
991
      AO_t *p;
992

993
      if ((word)list <= HBLKSIZE) return;
21,009,919✔
994

995
      prev = (AO_t *)pfreelist;
1,274,540✔
996
      for (p = list; p != NULL;) {
37,983,690✔
997
        AO_t *next;
998

999
        if (!GC_is_marked(p)) {
35,434,605✔
1000
          ABORT_ARG2("Unmarked local free list entry",
×
1001
                     ": object %p on list %p", (void *)p, (void *)list);
1002
        }
1003

1004
        /* While traversing the free-list, it re-reads the pointer to   */
1005
        /* the current node before accepting its next pointer and       */
1006
        /* bails out if the latter has changed.  That way, it won't     */
1007
        /* try to follow the pointer which might be been modified       */
1008
        /* after the object was returned to the client.  It might       */
1009
        /* perform the mark-check on the just allocated object but      */
1010
        /* that should be harmless.                                     */
1011
        next = (AO_t *)AO_load_acquire_read(p);
35,434,610✔
1012
        if (AO_load(prev) != (AO_t)p)
35,434,610✔
1013
          break;
×
1014
        prev = p;
35,434,610✔
1015
        p = next;
35,434,610✔
1016
      }
1017
#   else
1018
      /* FIXME: Not implemented (just skipped). */
1019
      (void)pfreelist;
1020
#   endif
1021
  }
1022
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
1023

1024
/* Clear all mark bits for the free list whose first entry is q */
1025
/* Decrement GC_bytes_found by number of bytes on free list.    */
1026
STATIC void GC_clear_fl_marks(ptr_t q)
50,343✔
1027
{
1028
      struct hblk *h = HBLKPTR(q);
50,343✔
1029
      struct hblk *last_h = h;
50,343✔
1030
      hdr *hhdr = HDR(h);
50,343✔
1031
      word sz = hhdr->hb_sz; /* Normally set only once. */
50,343✔
1032

1033
      for (;;) {
1034
        word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
4,540,457✔
1035

1036
        if (mark_bit_from_hdr(hhdr, bit_no)) {
4,540,457✔
1037
          size_t n_marks = hhdr -> hb_n_marks;
1,133,106✔
1038

1039
          GC_ASSERT(n_marks != 0);
1,133,106✔
1040
          clear_mark_bit_from_hdr(hhdr, bit_no);
1,133,106✔
1041
          n_marks--;
1,133,106✔
1042
#         ifdef PARALLEL_MARK
1043
            /* Appr. count, don't decrement to zero! */
1044
            if (0 != n_marks || !GC_parallel) {
1,133,106✔
1045
              hhdr -> hb_n_marks = n_marks;
1,132,644✔
1046
            }
1047
#         else
1048
            hhdr -> hb_n_marks = n_marks;
1049
#         endif
1050
        }
1051
        GC_bytes_found -= sz;
4,540,457✔
1052

1053
        q = (ptr_t)obj_link(q);
4,540,457✔
1054
        if (q == NULL)
4,540,457✔
1055
          break;
50,343✔
1056

1057
        h = HBLKPTR(q);
4,490,114✔
1058
        if (h != last_h) {
4,490,114✔
1059
          last_h = h;
471,353✔
1060
          hhdr = HDR(h);
471,353✔
1061
          sz = hhdr->hb_sz;
471,353✔
1062
        }
1063
      }
4,490,114✔
1064
}
50,343✔
1065

1066
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1067
  void GC_check_tls(void);
1068
#endif
1069

1070
GC_on_heap_resize_proc GC_on_heap_resize = 0;
1071

1072
/* Used for logging only. */
1073
GC_INLINE int GC_compute_heap_usage_percent(void)
×
1074
{
1075
  word used = GC_composite_in_use + GC_atomic_in_use + GC_bytes_allocd;
×
1076
  word heap_sz = GC_heapsize - GC_unmapped_bytes;
×
1077
# if defined(CPPCHECK)
1078
    word limit = (GC_WORD_MAX >> 1) / 50; /* to avoid a false positive */
1079
# else
1080
    const word limit = GC_WORD_MAX / 100;
×
1081
# endif
1082

1083
  return used >= heap_sz ? 0 : used < limit ?
×
1084
                (int)((used * 100) / heap_sz) : (int)(used / (heap_sz / 100));
×
1085
}
1086

1087
#define GC_DBGLOG_PRINT_HEAP_IN_USE() \
1088
  GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n", \
1089
                   GC_compute_heap_usage_percent(), \
1090
                   TO_KiB_UL(GC_composite_in_use), \
1091
                   TO_KiB_UL(GC_atomic_in_use + GC_bytes_allocd))
1092

1093
/* Finish up a collection.  Assumes mark bits are consistent, but the   */
1094
/* world is otherwise running.                                          */
1095
STATIC void GC_finish_collection(void)
27,938✔
1096
{
1097
#   ifndef NO_CLOCK
1098
      CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
27,938✔
1099
      CLOCK_TYPE finalize_time = CLOCK_TYPE_INITIALIZER;
27,938✔
1100
#   endif
1101

1102
    GC_ASSERT(I_HOLD_LOCK());
27,938✔
1103
#   if defined(GC_ASSERTIONS) \
1104
       && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1105
        /* Check that we marked some of our own data.           */
1106
        /* TODO: Add more checks. */
1107
        GC_check_tls();
27,938✔
1108
#   endif
1109

1110
#   ifndef NO_CLOCK
1111
      if (GC_print_stats)
27,942✔
1112
        GET_TIME(start_time);
×
1113
#   endif
1114
    if (GC_on_collection_event)
27,942✔
1115
      GC_on_collection_event(GC_EVENT_RECLAIM_START);
×
1116

1117
#   ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1118
      if (GC_bytes_found > 0)
27,942✔
1119
        GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
27,635✔
1120
#   endif
1121
    GC_bytes_found = 0;
27,942✔
1122
#   if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1123
        if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
27,942✔
1124
          GC_print_address_map();
×
1125
        }
1126
#   endif
1127
    COND_DUMP;
27,942✔
1128
    if (GC_find_leak) {
27,942✔
1129
      /* Mark all objects on the free list.  All objects should be      */
1130
      /* marked when we're done.                                        */
1131
      word size;        /* current object size  */
1132
      unsigned kind;
1133
      ptr_t q;
1134

1135
      for (kind = 0; kind < GC_n_kinds; kind++) {
330✔
1136
        for (size = 1; size <= MAXOBJGRANULES; size++) {
34,056✔
1137
          q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
33,792✔
1138
          if (q != NULL)
33,792✔
1139
            GC_set_fl_marks(q);
538✔
1140
        }
1141
      }
1142
      GC_start_reclaim(TRUE);
66✔
1143
        /* The above just checks; it doesn't really reclaim anything.   */
1144
    }
1145

1146
#   ifndef GC_NO_FINALIZATION
1147
      GC_finalize();
27,942✔
1148
#   endif
1149
#   ifndef NO_CLOCK
1150
      if (GC_print_stats)
27,942✔
1151
        GET_TIME(finalize_time);
×
1152
#   endif
1153

1154
    if (GC_print_back_height) {
27,942✔
1155
#     ifdef MAKE_BACK_GRAPH
1156
        GC_traverse_back_graph();
1157
#     elif !defined(SMALL_CONFIG)
1158
        GC_err_printf("Back height not available: "
×
1159
                      "Rebuild collector with -DMAKE_BACK_GRAPH\n");
1160
#     endif
1161
    }
1162

1163
    /* Clear free list mark bits, in case they got accidentally marked   */
1164
    /* (or GC_find_leak is set and they were intentionally marked).      */
1165
    /* Also subtract memory remaining from GC_bytes_found count.         */
1166
    /* Note that composite objects on free list are cleared.             */
1167
    /* Thus accidentally marking a free list is not a problem;  only     */
1168
    /* objects on the list itself will be marked, and that's fixed here. */
1169
    {
1170
      word size;        /* current object size          */
1171
      ptr_t q;          /* pointer to current object    */
1172
      unsigned kind;
1173

1174
      for (kind = 0; kind < GC_n_kinds; kind++) {
157,117✔
1175
        for (size = 1; size <= MAXOBJGRANULES; size++) {
16,663,575✔
1176
          q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
16,534,400✔
1177
          if (q != NULL)
16,534,400✔
1178
            GC_clear_fl_marks(q);
50,343✔
1179
        }
1180
      }
1181
    }
1182

1183
    GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
27,942✔
1184
                          (long)GC_bytes_found);
1185

1186
    /* Reconstruct free lists to contain everything not marked */
1187
    GC_start_reclaim(FALSE);
27,942✔
1188

1189
#   ifdef USE_MUNMAP
1190
      if (GC_unmap_threshold > 0 /* unmapping enabled? */
27,942✔
1191
          && EXPECT(GC_gc_no != 1, TRUE)) /* do not unmap during GC init */
27,942✔
1192
        GC_unmap_old(GC_unmap_threshold);
27,910✔
1193

1194
      GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
27,942✔
1195
#   endif
1196
    GC_ASSERT(GC_our_mem_bytes >= GC_heapsize);
27,942✔
1197
    GC_DBGLOG_PRINTF("GC #%lu freed %ld bytes, heap %lu KiB ("
27,942✔
1198
                     IF_USE_MUNMAP("+ %lu KiB unmapped ")
1199
                     "+ %lu KiB internal)\n",
1200
                     (unsigned long)GC_gc_no, (long)GC_bytes_found,
1201
                     TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
×
1202
                     COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)),
×
1203
                     TO_KiB_UL(GC_our_mem_bytes - GC_heapsize
×
1204
                               + sizeof(GC_arrays)));
1205
    GC_DBGLOG_PRINT_HEAP_IN_USE();
27,942✔
1206
    if (GC_is_full_gc) {
27,942✔
1207
        GC_used_heap_size_after_full = GC_heapsize - GC_large_free_bytes;
12,114✔
1208
        GC_need_full_gc = FALSE;
12,114✔
1209
    } else {
1210
        GC_need_full_gc = GC_heapsize - GC_used_heap_size_after_full
31,656✔
1211
                          > min_bytes_allocd() + GC_large_free_bytes;
15,828✔
1212
    }
1213

1214
    /* Reset or increment counters for next cycle */
1215
    GC_n_attempts = 0;
27,942✔
1216
    GC_is_full_gc = FALSE;
27,942✔
1217
    GC_bytes_allocd_before_gc += GC_bytes_allocd;
27,942✔
1218
    GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
27,942✔
1219
    GC_bytes_allocd = 0;
27,942✔
1220
    GC_bytes_dropped = 0;
27,942✔
1221
    GC_bytes_freed = 0;
27,942✔
1222
    GC_finalizer_bytes_freed = 0;
27,942✔
1223

1224
    if (GC_on_collection_event)
27,942✔
1225
      GC_on_collection_event(GC_EVENT_RECLAIM_END);
×
1226
#   ifndef NO_CLOCK
1227
      if (GC_print_stats) {
27,942✔
1228
        CLOCK_TYPE done_time;
1229

1230
        GET_TIME(done_time);
×
1231
#       if !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1232
          /* A convenient place to output finalization statistics.      */
1233
          GC_print_finalization_stats();
×
1234
#       endif
1235
        GC_log_printf("Finalize and initiate sweep took %lu ms %lu ns"
×
1236
                      " + %lu ms %lu ns\n",
1237
                      MS_TIME_DIFF(finalize_time, start_time),
×
1238
                      NS_FRAC_TIME_DIFF(finalize_time, start_time),
×
1239
                      MS_TIME_DIFF(done_time, finalize_time),
×
1240
                      NS_FRAC_TIME_DIFF(done_time, finalize_time));
×
1241
      }
1242
#   elif !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1243
      if (GC_print_stats)
1244
        GC_print_finalization_stats();
1245
#   endif
1246
}
27,942✔
1247

1248
STATIC word GC_heapsize_at_forced_unmap = 0;
1249
                                /* accessed with the allocation lock held */
1250

1251
/* If stop_func == 0 then GC_default_stop_func is used instead.         */
1252
STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
2,470✔
1253
                                         GC_bool force_unmap)
1254
{
1255
    GC_bool result;
1256
    IF_USE_MUNMAP(int old_unmap_threshold;)
1257
    IF_CANCEL(int cancel_state;)
1258

1259
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
2,470✔
1260
    if (GC_debugging_started) GC_print_all_smashed();
2,470✔
1261
    GC_INVOKE_FINALIZERS();
2,470✔
1262
    LOCK();
2,470✔
1263
    if (force_unmap) {
2,470✔
1264
      /* Record current heap size to make heap growth more conservative */
1265
      /* afterwards (as if the heap is growing from zero size again).   */
1266
      GC_heapsize_at_forced_unmap = GC_heapsize;
70✔
1267
    }
1268
    DISABLE_CANCEL(cancel_state);
2,470✔
1269
#   ifdef USE_MUNMAP
1270
      old_unmap_threshold = GC_unmap_threshold;
2,470✔
1271
      if (force_unmap ||
4,870✔
1272
          (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
2,400✔
1273
        GC_unmap_threshold = 1; /* unmap as much as possible */
70✔
1274
#   endif
1275
    ENTER_GC();
2,470✔
1276
    /* Minimize junk left in my registers */
1277
      GC_noop6(0,0,0,0,0,0);
2,470✔
1278
    result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
2,470✔
1279
                                     GC_default_stop_func);
1280
    EXIT_GC();
2,473✔
1281
    IF_USE_MUNMAP(GC_unmap_threshold = old_unmap_threshold); /* restore */
2,473✔
1282
    RESTORE_CANCEL(cancel_state);
2,473✔
1283
    UNLOCK();
2,473✔
1284
    if (result) {
2,471✔
1285
        if (GC_debugging_started) GC_print_all_smashed();
2,308✔
1286
        GC_INVOKE_FINALIZERS();
2,308✔
1287
    }
1288
    return result;
2,471✔
1289
}
1290

1291
/* Externally callable routines to invoke full, stop-the-world collection. */
1292

1293
GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
×
1294
{
1295
    GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
×
1296
    return (int)GC_try_to_collect_general(stop_func, FALSE);
×
1297
}
1298

1299
GC_API void GC_CALL GC_gcollect(void)
2,400✔
1300
{
1301
    /* 0 is passed as stop_func to get GC_default_stop_func value       */
1302
    /* while holding the allocation lock (to prevent data races).       */
1303
    (void)GC_try_to_collect_general(0, FALSE);
2,400✔
1304
    if (get_have_errors())
2,401✔
1305
      GC_print_all_errors();
56✔
1306
}
2,401✔
1307

1308
GC_API void GC_CALL GC_gcollect_and_unmap(void)
70✔
1309
{
1310
    /* Collect and force memory unmapping to OS. */
1311
    (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
70✔
1312
}
70✔
1313

1314
#ifdef USE_PROC_FOR_LIBRARIES
1315
  /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
1316
  GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
1317
  {
1318
    GC_ASSERT(I_HOLD_LOCK());
1319
    GC_ASSERT(p != NULL);
1320
    if (GC_n_memory >= MAX_HEAP_SECTS)
1321
      ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1322
    GC_our_memory[GC_n_memory].hs_start = p;
1323
    GC_our_memory[GC_n_memory].hs_bytes = bytes;
1324
    GC_n_memory++;
1325
    GC_our_mem_bytes += bytes;
1326
  }
1327
#endif
1328

1329
/* Use the chunk of memory starting at p of size bytes as part of the heap. */
1330
/* Assumes p is HBLKSIZE aligned, bytes argument is a multiple of HBLKSIZE. */
1331
STATIC void GC_add_to_heap(struct hblk *p, size_t bytes)
684✔
1332
{
1333
    hdr * phdr;
1334
    word endp;
1335
    size_t old_capacity = 0;
684✔
1336
    void *old_heap_sects = NULL;
684✔
1337
#   ifdef GC_ASSERTIONS
1338
      unsigned i;
1339
#   endif
1340

1341
    GC_ASSERT(I_HOLD_LOCK());
684✔
1342
    GC_ASSERT((word)p % HBLKSIZE == 0);
684✔
1343
    GC_ASSERT(bytes % HBLKSIZE == 0);
684✔
1344
    GC_ASSERT(bytes > 0);
684✔
1345
    GC_ASSERT(GC_all_nils != NULL);
684✔
1346

1347
    if (EXPECT(GC_n_heap_sects == GC_capacity_heap_sects, FALSE)) {
684✔
1348
      /* Allocate new GC_heap_sects with sufficient capacity.   */
1349
#     ifndef INITIAL_HEAP_SECTS
1350
#       define INITIAL_HEAP_SECTS 32
1351
#     endif
1352
      size_t new_capacity = GC_n_heap_sects > 0 ?
70✔
1353
                (size_t)GC_n_heap_sects * 2 : INITIAL_HEAP_SECTS;
35✔
1354
      void *new_heap_sects =
35✔
1355
                GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
35✔
1356

1357
      if (NULL == new_heap_sects) {
35✔
1358
        /* Retry with smaller yet sufficient capacity.  */
1359
        new_capacity = (size_t)GC_n_heap_sects + INITIAL_HEAP_SECTS;
×
1360
        new_heap_sects =
×
1361
                GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
×
1362
        if (NULL == new_heap_sects)
×
1363
          ABORT("Insufficient memory for heap sections");
×
1364
      }
1365
      old_capacity = GC_capacity_heap_sects;
35✔
1366
      old_heap_sects = GC_heap_sects;
35✔
1367
      /* Transfer GC_heap_sects contents to the newly allocated array.  */
1368
      if (GC_n_heap_sects > 0)
35✔
1369
        BCOPY(old_heap_sects, new_heap_sects,
3✔
1370
              GC_n_heap_sects * sizeof(struct HeapSect));
1371
      GC_capacity_heap_sects = new_capacity;
35✔
1372
      GC_heap_sects = (struct HeapSect *)new_heap_sects;
35✔
1373
      GC_COND_LOG_PRINTF("Grew heap sections array to %lu elements\n",
35✔
1374
                         (unsigned long)new_capacity);
1375
    }
1376

1377
    while (EXPECT((word)p <= HBLKSIZE, FALSE)) {
1,368✔
1378
        /* Can't handle memory near address zero. */
1379
        ++p;
×
1380
        bytes -= HBLKSIZE;
×
1381
        if (0 == bytes) return;
×
1382
    }
1383
    endp = (word)p + bytes;
684✔
1384
    if (EXPECT(endp <= (word)p, FALSE)) {
684✔
1385
        /* Address wrapped. */
1386
        bytes -= HBLKSIZE;
×
1387
        if (0 == bytes) return;
×
1388
        endp -= HBLKSIZE;
×
1389
    }
1390
    phdr = GC_install_header(p);
684✔
1391
    if (EXPECT(NULL == phdr, FALSE)) {
684✔
1392
        /* This is extremely unlikely. Can't add it.  This will         */
1393
        /* almost certainly result in a 0 return from the allocator,    */
1394
        /* which is entirely appropriate.                               */
1395
        return;
×
1396
    }
1397
    GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
684✔
1398
#   ifdef GC_ASSERTIONS
1399
      /* Ensure no intersection between sections.       */
1400
      for (i = 0; i < GC_n_heap_sects; i++) {
11,864✔
1401
        word hs_start = (word)GC_heap_sects[i].hs_start;
11,180✔
1402
        word hs_end = hs_start + GC_heap_sects[i].hs_bytes;
11,180✔
1403

1404
        GC_ASSERT(!((hs_start <= (word)p && (word)p < hs_end)
11,180✔
1405
                    || (hs_start < endp && endp <= hs_end)
1406
                    || ((word)p < hs_start && hs_end < endp)));
1407
      }
1408
#   endif
1409
    GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
684✔
1410
    GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
684✔
1411
    GC_n_heap_sects++;
684✔
1412
    phdr -> hb_sz = bytes;
684✔
1413
    phdr -> hb_flags = 0;
684✔
1414
    GC_freehblk(p);
684✔
1415
    GC_heapsize += bytes;
684✔
1416

1417
    if ((word)p <= (word)GC_least_plausible_heap_addr
684✔
1418
        || EXPECT(NULL == GC_least_plausible_heap_addr, FALSE)) {
648✔
1419
        GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
36✔
1420
                /* Making it a little smaller than necessary prevents   */
1421
                /* us from getting a false hit from the variable        */
1422
                /* itself.  There's some unintentional reflection       */
1423
                /* here.                                                */
1424
    }
1425
    if (endp > (word)GC_greatest_plausible_heap_addr) {
684✔
1426
        GC_greatest_plausible_heap_addr = (void *)endp;
×
1427
    }
1428

1429
    if (EXPECT(old_capacity > 0, FALSE)) {
684✔
1430
#     ifndef GWW_VDB
1431
        /* Recycling may call GC_add_to_heap() again but should not     */
1432
        /* cause resizing of GC_heap_sects.                             */
1433
        GC_scratch_recycle_no_gww(old_heap_sects,
3✔
1434
                                  old_capacity * sizeof(struct HeapSect));
1435
#     else
1436
        /* TODO: implement GWW-aware recycling as in alloc_mark_stack */
1437
        GC_noop1((word)old_heap_sects);
1438
#     endif
1439
    }
1440
}
1441

1442
#if !defined(NO_DEBUGGING)
1443
  void GC_print_heap_sects(void)
×
1444
  {
1445
    unsigned i;
1446

1447
    GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
×
1448
              (unsigned long)GC_heapsize /*, */
×
1449
              COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
×
1450

1451
    for (i = 0; i < GC_n_heap_sects; i++) {
×
1452
      ptr_t start = GC_heap_sects[i].hs_start;
×
1453
      size_t len = GC_heap_sects[i].hs_bytes;
×
1454
      struct hblk *h;
1455
      unsigned nbl = 0;
×
1456

1457
      for (h = (struct hblk *)start; (word)h < (word)(start + len); h++) {
×
1458
        if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
×
1459
      }
1460
      GC_printf("Section %d from %p to %p %u/%lu blacklisted\n",
×
1461
                i, (void *)start, (void *)&start[len],
1462
                nbl, (unsigned long)divHBLKSZ(len));
×
1463
    }
1464
  }
×
1465
#endif
1466

1467
void * GC_least_plausible_heap_addr = (void *)GC_WORD_MAX;
1468
void * GC_greatest_plausible_heap_addr = 0;
1469

1470
STATIC word GC_max_heapsize = 0;
1471

1472
GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
2✔
1473
{
1474
    GC_max_heapsize = n;
2✔
1475
}
2✔
1476

1477
word GC_max_retries = 0;
1478

1479
GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes)
257✔
1480
{
1481
  size_t page_offset;
1482
  size_t displ = 0;
257✔
1483
  size_t recycled_bytes;
1484

1485
  GC_ASSERT(I_HOLD_LOCK());
257✔
1486
  if (NULL == ptr) return;
257✔
1487

1488
  GC_ASSERT(bytes != 0);
257✔
1489
  GC_ASSERT(GC_page_size != 0);
257✔
1490
  /* TODO: Assert correct memory flags if GWW_VDB */
1491
  page_offset = (word)ptr & (GC_page_size - 1);
257✔
1492
  if (page_offset != 0)
257✔
1493
    displ = GC_page_size - page_offset;
1✔
1494
  recycled_bytes = bytes > displ ? (bytes - displ) & ~(GC_page_size - 1) : 0;
257✔
1495
  GC_COND_LOG_PRINTF("Recycle %lu/%lu scratch-allocated bytes at %p\n",
257✔
1496
                (unsigned long)recycled_bytes, (unsigned long)bytes, ptr);
1497
  if (recycled_bytes > 0)
257✔
1498
    GC_add_to_heap((struct hblk *)((word)ptr + displ), recycled_bytes);
254✔
1499
}
1500

1501
/* This explicitly increases the size of the heap.  It is used          */
1502
/* internally, but may also be invoked from GC_expand_hp by the user.   */
1503
/* The argument is in units of HBLKSIZE (zero is treated as 1).         */
1504
/* Returns FALSE on failure.                                            */
1505
GC_INNER GC_bool GC_expand_hp_inner(word n)
448✔
1506
{
1507
    size_t bytes;
1508
    struct hblk * space;
1509
    word expansion_slop;        /* Number of bytes by which we expect   */
1510
                                /* the heap to expand soon.             */
1511

1512
    GC_ASSERT(I_HOLD_LOCK());
448✔
1513
    GC_ASSERT(GC_page_size != 0);
448✔
1514
    if (0 == n) n = 1;
448✔
1515
    bytes = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
448✔
1516
    GC_DBGLOG_PRINT_HEAP_IN_USE();
448✔
1517
    if (GC_max_heapsize != 0
448✔
1518
        && (GC_max_heapsize < (word)bytes
22✔
1519
            || GC_heapsize > GC_max_heapsize - (word)bytes)) {
4✔
1520
        /* Exceeded self-imposed limit */
1521
        return FALSE;
18✔
1522
    }
1523
    space = GET_MEM(bytes);
430✔
1524
    if (EXPECT(NULL == space, FALSE)) {
430✔
1525
        WARN("Failed to expand heap by %" WARN_PRIuPTR " KiB\n", bytes >> 10);
×
1526
        return FALSE;
×
1527
    }
1528
    GC_add_to_our_memory((ptr_t)space, bytes);
430✔
1529
    GC_last_heap_growth_gc_no = GC_gc_no;
430✔
1530
    GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
430✔
1531
                      TO_KiB_UL(GC_heapsize + bytes),
×
1532
                      (unsigned long)GC_bytes_allocd);
×
1533

1534
    /* Adjust heap limits generously for blacklisting to work better.   */
1535
    /* GC_add_to_heap performs minimal adjustment needed for            */
1536
    /* correctness.                                                     */
1537
    expansion_slop = min_bytes_allocd() + 4 * MAXHINCR * HBLKSIZE;
430✔
1538
    if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
430✔
1539
        || (GC_last_heap_addr != 0
398✔
1540
            && (word)GC_last_heap_addr < (word)space)) {
435✔
1541
        /* Assume the heap is growing up. */
1542
        word new_limit = (word)space + (word)bytes + expansion_slop;
37✔
1543
        if (new_limit > (word)space
37✔
1544
            && (word)GC_greatest_plausible_heap_addr < new_limit)
37✔
1545
          GC_greatest_plausible_heap_addr = (void *)new_limit;
34✔
1546
    } else {
1547
        /* Heap is growing down. */
1548
        word new_limit = (word)space - expansion_slop;
393✔
1549
        if (new_limit < (word)space
393✔
1550
            && (word)GC_least_plausible_heap_addr > new_limit)
393✔
1551
          GC_least_plausible_heap_addr = (void *)new_limit;
367✔
1552
    }
1553
    GC_last_heap_addr = (ptr_t)space;
430✔
1554

1555
    GC_add_to_heap(space, bytes);
430✔
1556
    if (GC_on_heap_resize)
430✔
1557
        (*GC_on_heap_resize)(GC_heapsize);
×
1558

1559
    return TRUE;
430✔
1560
}
1561

1562
/* Really returns a bool, but it's externally visible, so that's clumsy. */
1563
GC_API int GC_CALL GC_expand_hp(size_t bytes)
4✔
1564
{
1565
    word n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(bytes);
4✔
1566
    word old_heapsize;
1567
    GC_bool result;
1568

1569
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
4✔
1570
    LOCK();
4✔
1571
    old_heapsize = GC_heapsize;
4✔
1572
    result = GC_expand_hp_inner(n_blocks);
4✔
1573
    if (result) {
4✔
1574
      GC_requested_heapsize += bytes;
4✔
1575
      if (GC_dont_gc) {
4✔
1576
        /* Do not call WARN if the heap growth is intentional.  */
1577
        GC_ASSERT(GC_heapsize >= old_heapsize);
2✔
1578
        GC_heapsize_on_gc_disable += GC_heapsize - old_heapsize;
2✔
1579
      }
1580
    }
1581
    UNLOCK();
4✔
1582
    return (int)result;
4✔
1583
}
1584

1585
GC_INNER unsigned GC_fail_count = 0;
1586
                        /* How many consecutive GC/expansion failures?  */
1587
                        /* Reset by GC_allochblk.                       */
1588

1589
/* The minimum value of the ratio of allocated bytes since the latest   */
1590
/* GC to the amount of finalizers created since that GC which triggers  */
1591
/* the collection instead heap expansion.  Has no effect in the         */
1592
/* incremental mode.                                                    */
1593
#if defined(GC_ALLOCD_BYTES_PER_FINALIZER) && !defined(CPPCHECK)
1594
  STATIC word GC_allocd_bytes_per_finalizer = GC_ALLOCD_BYTES_PER_FINALIZER;
1595
#else
1596
  STATIC word GC_allocd_bytes_per_finalizer = 10000;
1597
#endif
1598

1599
GC_API void GC_CALL GC_set_allocd_bytes_per_finalizer(GC_word value)
2✔
1600
{
1601
  GC_allocd_bytes_per_finalizer = value;
2✔
1602
}
2✔
1603

1604
GC_API GC_word GC_CALL GC_get_allocd_bytes_per_finalizer(void)
2✔
1605
{
1606
  return GC_allocd_bytes_per_finalizer;
2✔
1607
}
1608

1609
static word last_fo_entries = 0;
1610
static word last_bytes_finalized = 0;
1611

1612
/* Collect or expand heap in an attempt make the indicated number of    */
1613
/* free blocks available.  Should be called until the blocks are        */
1614
/* available (setting retry value to TRUE unless this is the first call */
1615
/* in a loop) or until it fails by returning FALSE.  The flags argument */
1616
/* should be IGNORE_OFF_PAGE or 0.                                      */
1617
GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
8,722✔
1618
                                      unsigned flags,
1619
                                      GC_bool retry)
1620
{
1621
    GC_bool gc_not_stopped = TRUE;
8,722✔
1622
    word blocks_to_get;
1623
    IF_CANCEL(int cancel_state;)
1624

1625
    GC_ASSERT(I_HOLD_LOCK());
8,722✔
1626
    GC_ASSERT(GC_is_initialized);
8,722✔
1627
    DISABLE_CANCEL(cancel_state);
8,722✔
1628
    if (!GC_incremental && !GC_dont_gc &&
17,108✔
1629
        ((GC_dont_expand && GC_bytes_allocd > 0)
8,386✔
1630
         || (GC_fo_entries > last_fo_entries
8,386✔
1631
             && (last_bytes_finalized | GC_bytes_finalized) != 0
260✔
1632
             && (GC_fo_entries - last_fo_entries)
506✔
1633
                * GC_allocd_bytes_per_finalizer > GC_bytes_allocd)
253✔
1634
         || GC_should_collect())) {
8,133✔
1635
      /* Try to do a full collection using 'default' stop_func (unless  */
1636
      /* nothing has been allocated since the latest collection or heap */
1637
      /* expansion is disabled).                                        */
1638
      gc_not_stopped = GC_try_to_collect_inner(
16,619✔
1639
                        GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
16,619✔
1640
                        GC_default_stop_func : GC_never_stop_func);
1641
      if (gc_not_stopped == TRUE || !retry) {
8,310✔
1642
        /* Either the collection hasn't been aborted or this is the     */
1643
        /* first attempt (in a loop).                                   */
1644
        last_fo_entries = GC_fo_entries;
8,310✔
1645
        last_bytes_finalized = GC_bytes_finalized;
8,310✔
1646
        RESTORE_CANCEL(cancel_state);
8,310✔
1647
        return TRUE;
17,032✔
1648
      }
1649
    }
1650

1651
    blocks_to_get = (GC_heapsize - GC_heapsize_at_forced_unmap)
824✔
1652
                        / (HBLKSIZE * GC_free_space_divisor)
412✔
1653
                    + needed_blocks;
1654
    if (blocks_to_get > MAXHINCR) {
412✔
1655
      word slop;
1656

1657
      /* Get the minimum required to make it likely that we can satisfy */
1658
      /* the current request in the presence of black-listing.          */
1659
      /* This will probably be more than MAXHINCR.                      */
1660
      if ((flags & IGNORE_OFF_PAGE) != 0) {
30✔
1661
        slop = 4;
×
1662
      } else {
1663
        slop = 2 * divHBLKSZ(BL_LIMIT);
30✔
1664
        if (slop > needed_blocks) slop = needed_blocks;
30✔
1665
      }
1666
      if (needed_blocks + slop > MAXHINCR) {
30✔
1667
        blocks_to_get = needed_blocks + slop;
18✔
1668
      } else {
1669
        blocks_to_get = MAXHINCR;
12✔
1670
      }
1671
      if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
30✔
1672
        blocks_to_get = divHBLKSZ(GC_WORD_MAX);
10✔
1673
    } else if (blocks_to_get < MINHINCR) {
382✔
1674
      blocks_to_get = MINHINCR;
59✔
1675
    }
1676

1677
    if (GC_max_heapsize > GC_heapsize) {
412✔
1678
      word max_get_blocks = divHBLKSZ(GC_max_heapsize - GC_heapsize);
18✔
1679
      if (blocks_to_get > max_get_blocks)
18✔
1680
        blocks_to_get = max_get_blocks > needed_blocks
18✔
1681
                        ? max_get_blocks : needed_blocks;
1682
    }
1683

1684
#   ifdef USE_MUNMAP
1685
      if (GC_unmap_threshold > 1) {
412✔
1686
        /* Return as much memory to the OS as possible before   */
1687
        /* trying to get memory from it.                        */
1688
        GC_unmap_old(0);
412✔
1689
      }
1690
#   endif
1691
    if (!GC_expand_hp_inner(blocks_to_get)
412✔
1692
        && (blocks_to_get == needed_blocks
18✔
1693
            || !GC_expand_hp_inner(needed_blocks))) {
×
1694
      if (gc_not_stopped == FALSE) {
18✔
1695
        /* Don't increment GC_fail_count here (and no warning).     */
1696
        GC_gcollect_inner();
×
1697
        GC_ASSERT(GC_bytes_allocd == 0);
×
1698
      } else if (GC_fail_count++ < GC_max_retries) {
18✔
1699
        WARN("Out of Memory!  Trying to continue...\n", 0);
×
1700
        GC_gcollect_inner();
×
1701
      } else {
1702
#       if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
1703
#         ifdef USE_MUNMAP
1704
            GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
18✔
1705
#         endif
1706
          WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " MiB."
18✔
1707
               " Returning NULL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
1708
#       endif
1709
        RESTORE_CANCEL(cancel_state);
18✔
1710
        return FALSE;
18✔
1711
      }
1712
    } else if (GC_fail_count) {
394✔
1713
      GC_COND_LOG_PRINTF("Memory available again...\n");
×
1714
    }
1715
    RESTORE_CANCEL(cancel_state);
394✔
1716
    return TRUE;
394✔
1717
}
1718

1719
/*
1720
 * Make sure the object free list for size gran (in granules) is not empty.
1721
 * Return a pointer to the first object on the free list.
1722
 * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
1723
 */
1724
GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
819,366✔
1725
{
1726
    void ** flh = &GC_obj_kinds[kind].ok_freelist[gran];
819,366✔
1727
    GC_bool tried_minor = FALSE;
819,366✔
1728
    GC_bool retry = FALSE;
819,366✔
1729

1730
    GC_ASSERT(I_HOLD_LOCK());
819,366✔
1731
    GC_ASSERT(GC_is_initialized);
819,366✔
1732
    if (0 == gran) return NULL;
819,366✔
1733

1734
    while (NULL == *flh) {
2,464,064✔
1735
      ENTER_GC();
825,332✔
1736
#     ifndef GC_DISABLE_INCREMENTAL
1737
        if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED
825,332✔
1738
            && !GC_dont_gc) {
×
1739
          /* True incremental mode, not just generational.      */
1740
          /* Do our share of marking work.                      */
1741
          GC_collect_a_little_inner(1);
×
1742
        }
1743
#     endif
1744
      /* Sweep blocks for objects of this size */
1745
        GC_ASSERT(!GC_is_full_gc
825,332✔
1746
                  || NULL == GC_obj_kinds[kind].ok_reclaim_list
1747
                  || NULL == GC_obj_kinds[kind].ok_reclaim_list[gran]);
1748
        GC_continue_reclaim(gran, kind);
825,332✔
1749
      EXIT_GC();
825,332✔
1750
#     if defined(CPPCHECK)
1751
        GC_noop1((word)&flh);
1752
#     endif
1753
      if (NULL == *flh) {
825,332✔
1754
        GC_new_hblk(gran, kind);
465,099✔
1755
#       if defined(CPPCHECK)
1756
          GC_noop1((word)&flh);
1757
#       endif
1758
        if (NULL == *flh) {
465,099✔
1759
          ENTER_GC();
5,984✔
1760
          if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
5,984✔
1761
              && !tried_minor && !GC_dont_gc) {
424✔
1762
            GC_collect_a_little_inner(1);
215✔
1763
            tried_minor = TRUE;
215✔
1764
          } else {
1765
            if (!GC_collect_or_expand(1, 0 /* flags */, retry)) {
5,769✔
1766
              EXIT_GC();
×
1767
              return NULL;
×
1768
            }
1769
            retry = TRUE;
5,769✔
1770
          }
1771
          EXIT_GC();
5,984✔
1772
        }
1773
      }
1774
    }
1775
    /* Successful allocation; reset failure count.      */
1776
    GC_fail_count = 0;
819,366✔
1777

1778
    return (ptr_t)(*flh);
819,366✔
1779
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc