• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bdwgc / bdwgc / 2053

22 Feb 2026 05:48AM UTC coverage: 77.233% (+3.3%) from 73.894%
2053

push

travis-ci

ivmai
Fix missing GC_ATTR_NONNULL for API functions
(fix of commit d012f92c)

* include/gc/gc.h (GC_exclude_static_roots, GC_add_roots): Add
`GC_ATTR_NONNULL` attribute for the appropriate arguments.
* include/gc/gc.h [GC_WIN32_THREADS && (!GC_PTHREADS || GC_BUILD
|| GC_WINDOWS_H_INCLUDED) && (!GC_NO_THREAD_DECLS || GC_BUILD)
&& !GC_DONT_INCL_WINDOWS_H] (GC_CreateThread, GC_beginthreadex):
Likewise.
* include/gc/gc_inline.h (GC_generic_malloc_many): Likewise.
* include/gc/gc_mark.h (GC_mark_and_push, GC_new_proc,
GC_new_proc_inner): Likewise.
* include/gc/gc_pthread_redirects.h [GC_PTHREADS
&& !GC_PTHREAD_REDIRECTS_ONLY] (GC_pthread_create): Likewise.
* include/private/gc_priv.h (NONNULL_PROC_NOT_ZERO): New macro.
* mallocx.c (GC_generic_malloc_many): Add assertion that `result` is
non-null.
* mark_rts.c (GC_add_roots_inner): Add assertion that `b` is non-null.
* mark_rts.c (GC_exclude_static_roots_inner): Add assertion that
`start` is non-null.
* misc.c (GC_new_proc_inner): Add assertion that `proc` is non-zero.
* pthread_support.c (GC_wrap_pthread_create): Add assertion that
`start_routine` is non-zero.

3 of 3 new or added lines in 2 files covered. (100.0%)

128 existing lines in 9 files now uncovered.

6873 of 8899 relevant lines covered (77.23%)

17354920.53 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

70.28
/finalize.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 2007 Free Software Foundation, Inc.
6
 * Copyright (c) 2008-2025 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_pmark.h"
19

20
#ifndef GC_NO_FINALIZATION
21
#  include "gc/javaxfc.h" /*< to get `GC_finalize_all()` as `extern "C"` */
22

23
/*
24
 * Type of mark procedure used for marking from finalizable object.
25
 * This procedure normally does not mark the object, only its descendants.
26
 */
27
typedef void (*finalization_mark_proc)(ptr_t /* `finalizable_obj_ptr` */);
28

29
#  define HASH3(addr, size, log_size)                               \
30
    ((size_t)((ADDR(addr) >> 3) ^ (ADDR(addr) >> (3 + (log_size)))) \
31
     & ((size) - (size_t)1))
32
#  define HASH2(addr, log_size) HASH3(addr, (size_t)1 << (log_size), log_size)
33

34
struct hash_chain_entry {
35
  GC_hidden_pointer hidden_key;
36
  struct hash_chain_entry *next;
37
};
38

39
struct disappearing_link {
40
  struct hash_chain_entry prolog;
41
#  define dl_hidden_link prolog.hidden_key /*< field to be cleared */
42
#  define dl_next(x) (struct disappearing_link *)((x)->prolog.next)
43
#  define dl_set_next(x, y) \
44
    (void)((x)->prolog.next = (struct hash_chain_entry *)(y))
45
  GC_hidden_pointer dl_hidden_obj; /*< pointer to object base */
46
};
47

48
struct finalizable_object {
49
  struct hash_chain_entry prolog;
50
  /*
51
   * Pointer to object base.  No longer hidden once object is on
52
   * `finalize_now` queue.
53
   */
54
#  define fo_hidden_base prolog.hidden_key
55
#  define fo_next(x) (struct finalizable_object *)((x)->prolog.next)
56
#  define fo_set_next(x, y) ((x)->prolog.next = (struct hash_chain_entry *)(y))
57
  GC_finalization_proc fo_fn;          /*< the finalizer */
58
  finalization_mark_proc fo_mark_proc; /*< mark-through procedure */
59
  ptr_t fo_client_data;
60
  size_t fo_object_sz; /*< in bytes */
61
};
62

63
#  ifdef AO_HAVE_store
64
/*
65
 * Update `finalize_now` atomically as `GC_should_invoke_finalizers`
66
 * does not acquire the allocator lock.
67
 */
68
#    define SET_FINALIZE_NOW(fo) \
69
      GC_cptr_store((volatile ptr_t *)&GC_fnlz_roots.finalize_now, (ptr_t)(fo))
70
#  else
71
#    define SET_FINALIZE_NOW(fo) (void)(GC_fnlz_roots.finalize_now = (fo))
72
#  endif /* !THREADS */
73

74
GC_API void GC_CALL
75
GC_push_finalizer_structures(void)
31,049✔
76
{
77
  GC_ASSERT(ADDR(&GC_dl_hashtbl.head) % ALIGNMENT == 0);
78
  GC_ASSERT(ADDR(&GC_fnlz_roots) % ALIGNMENT == 0);
79
#  ifndef GC_LONG_REFS_NOT_NEEDED
80
  GC_ASSERT(ADDR(&GC_ll_hashtbl.head) % ALIGNMENT == 0);
81
  GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
31,049✔
82
#  endif
83
  GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
31,049✔
84
  GC_PUSH_ALL_SYM(GC_fnlz_roots);
31,049✔
85
  /* `GC_toggleref_arr` is pushed specially by `GC_mark_togglerefs`. */
86
}
31,049✔
87

88
/*
89
 * Threshold of `log_size` to initiate full collection before growing
90
 * a hash table.
91
 */
92
#  ifndef GC_ON_GROW_LOG_SIZE_MIN
93
#    define GC_ON_GROW_LOG_SIZE_MIN LOG_HBLKSIZE
94
#  endif
95

96
/*
97
 * Ensure the hash table has enough capacity.  `*table_ptr` is a pointer
98
 * to an array of hash headers.  `*log_size_ptr` is the log of its current
99
 * size.  We update both `*table_ptr` and `*log_size_ptr` on success.
100
 */
101
STATIC void
102
GC_grow_table(struct hash_chain_entry ***table_ptr, unsigned *log_size_ptr,
1,235✔
103
              const size_t *entries_ptr)
104
{
105
  size_t i;
106
  struct hash_chain_entry *p;
107
  unsigned log_old_size = *log_size_ptr;
1,235✔
108
  unsigned log_new_size = log_old_size + 1;
1,235✔
109
  size_t old_size = NULL == *table_ptr ? 0 : (size_t)1 << log_old_size;
1,235✔
110
  size_t new_size = (size_t)1 << log_new_size;
1,235✔
111
  /* FIXME: Power-of-two size often gets rounded up to one more page. */
112
  struct hash_chain_entry **new_table;
113

114
  GC_ASSERT(I_HOLD_LOCK());
1,235✔
115
  /*
116
   * Avoid growing the table in case of at least 25% of entries can
117
   * be deleted by enforcing a collection.  Ignored for small tables.
118
   * In the incremental mode we skip this optimization, as we want to
119
   * avoid triggering a full collection whenever possible.
120
   */
121
  if (log_old_size >= (unsigned)GC_ON_GROW_LOG_SIZE_MIN && !GC_incremental) {
1,235✔
122
    IF_CANCEL(int cancel_state;)
123

124
    DISABLE_CANCEL(cancel_state);
8✔
125
    GC_gcollect_inner();
8✔
126
    RESTORE_CANCEL(cancel_state);
8✔
127
    /* `GC_finalize` might decrease entries value. */
128
    if (*entries_ptr < ((size_t)1 << log_old_size) - (*entries_ptr >> 2))
8✔
129
      return;
×
130
  }
131

132
  new_table = (struct hash_chain_entry **)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
1,235✔
133
      new_size * sizeof(struct hash_chain_entry *), NORMAL);
134
  if (NULL == new_table) {
1,235✔
135
    if (NULL == *table_ptr) {
×
136
      ABORT("Insufficient space for initial table allocation");
×
137
    } else {
138
      return;
×
139
    }
140
  }
141
  for (i = 0; i < old_size; i++) {
684,963✔
142
    for (p = (*table_ptr)[i]; p != NULL;) {
1,368,631✔
143
      ptr_t real_key = (ptr_t)GC_REVEAL_POINTER(p->hidden_key);
684,903✔
144
      struct hash_chain_entry *next = p->next;
684,903✔
145
      size_t new_hash = HASH3(real_key, new_size, log_new_size);
684,903✔
146

147
      p->next = new_table[new_hash];
684,903✔
148
      GC_dirty(p);
684,903✔
149
      new_table[new_hash] = p;
684,903✔
150
      p = next;
684,903✔
151
    }
152
  }
153
  *log_size_ptr = log_new_size;
1,235✔
154
  *table_ptr = new_table;
1,235✔
155
  GC_dirty(new_table); /*< entire object */
1,235✔
156
}
157

158
GC_API int GC_CALL
159
GC_register_disappearing_link(void **link)
63✔
160
{
161
  ptr_t base;
162

163
  base = (ptr_t)GC_base(link);
63✔
164
  if (NULL == base)
63✔
165
    ABORT("Bad arg to GC_register_disappearing_link");
×
166
  return GC_general_register_disappearing_link(link, base);
63✔
167
}
168

169
STATIC int
170
GC_register_disappearing_link_inner(struct dl_hashtbl_s *dl_hashtbl,
1,113,975✔
171
                                    void **link, const void *obj,
172
                                    const char *tbl_log_name)
173
{
174
  struct disappearing_link *curr_dl;
175
  size_t index;
176
  struct disappearing_link *new_dl;
177

178
  GC_ASSERT(GC_is_initialized);
1,113,975✔
179
  if (UNLIKELY(GC_find_leak_inner))
1,113,975✔
180
    return GC_UNIMPLEMENTED;
×
181
#  ifdef GC_ASSERTIONS
182
  /* Just check accessibility. */
183
  GC_noop1_ptr(*link);
1,113,975✔
184
#  endif
185
  LOCK();
1,113,975✔
186
  GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
1,113,975✔
187
  if (UNLIKELY(NULL == dl_hashtbl->head)
1,113,975✔
188
      || UNLIKELY(dl_hashtbl->entries > ((size_t)1 << dl_hashtbl->log_size))) {
1,113,926✔
189
    GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl->head,
869✔
190
                  &dl_hashtbl->log_size, &dl_hashtbl->entries);
869✔
191
    GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name,
869✔
192
                       1U << dl_hashtbl->log_size);
×
193
  }
194
  index = HASH2(link, dl_hashtbl->log_size);
1,113,975✔
195
  for (curr_dl = dl_hashtbl->head[index]; curr_dl != 0;
1,178,339✔
196
       curr_dl = dl_next(curr_dl)) {
64,364✔
197
    if (curr_dl->dl_hidden_link == GC_HIDE_POINTER(link)) {
64,364✔
198
      /* Alternatively, `GC_HIDE_NZ_POINTER()` could be used instead. */
199
      curr_dl->dl_hidden_obj = GC_HIDE_POINTER(obj);
×
200
      UNLOCK();
×
201
      return GC_DUPLICATE;
×
202
    }
203
  }
204
  new_dl = (struct disappearing_link *)GC_INTERNAL_MALLOC(
1,113,975✔
205
      sizeof(struct disappearing_link), NORMAL);
206
  if (UNLIKELY(NULL == new_dl)) {
1,113,975✔
207
    GC_oom_func oom_fn = GC_oom_fn;
×
208
    UNLOCK();
×
209
    new_dl = (struct disappearing_link *)(*oom_fn)(
×
210
        sizeof(struct disappearing_link));
211
    if (NULL == new_dl) {
×
212
      return GC_NO_MEMORY;
×
213
    }
214
    /* It is not likely we will make it here, but... */
215
    LOCK();
×
216
    /* Recalculate `index` since the table may grow. */
217
    index = HASH2(link, dl_hashtbl->log_size);
×
218
    /* Check again that our disappearing link not in the table. */
219
    for (curr_dl = dl_hashtbl->head[index]; curr_dl != NULL;
×
220
         curr_dl = dl_next(curr_dl)) {
×
221
      if (curr_dl->dl_hidden_link == GC_HIDE_POINTER(link)) {
×
222
        curr_dl->dl_hidden_obj = GC_HIDE_POINTER(obj);
×
223
        UNLOCK();
×
224
#  ifndef DBG_HDRS_ALL
225
        /* Free unused `new_dl` returned by `GC_oom_fn()`. */
226
        GC_free(new_dl);
×
227
#  endif
228
        return GC_DUPLICATE;
×
229
      }
230
    }
231
  }
232
  new_dl->dl_hidden_obj = GC_HIDE_POINTER(obj);
1,113,975✔
233
  new_dl->dl_hidden_link = GC_HIDE_POINTER(link);
1,113,975✔
234
  dl_set_next(new_dl, dl_hashtbl->head[index]);
1,113,975✔
235
  GC_dirty(new_dl);
1,113,975✔
236
  dl_hashtbl->head[index] = new_dl;
1,113,975✔
237
  dl_hashtbl->entries++;
1,113,975✔
238
  GC_dirty(dl_hashtbl->head + index);
1,113,975✔
239
  UNLOCK();
1,113,975✔
240
  return GC_SUCCESS;
1,113,975✔
241
}
242

243
GC_API int GC_CALL
244
GC_general_register_disappearing_link(void **link, const void *obj)
558,027✔
245
{
246
  if ((ADDR(link) & (ALIGNMENT - 1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
558,027✔
247
    ABORT("Bad arg to GC_general_register_disappearing_link");
×
248
  return GC_register_disappearing_link_inner(&GC_dl_hashtbl, link, obj, "dl");
558,027✔
249
}
250

251
#  ifdef DBG_HDRS_ALL
252
#    define FREE_DL_ENTRY(curr_dl) dl_set_next(curr_dl, NULL)
253
#  else
254
#    define FREE_DL_ENTRY(curr_dl) GC_free(curr_dl)
255
#  endif
256

257
/* Unregisters given `link` and returns the link entry to free. */
258
GC_INLINE struct disappearing_link *
259
GC_unregister_disappearing_link_inner(struct dl_hashtbl_s *dl_hashtbl,
555,948✔
260
                                      void **link)
261
{
262
  struct disappearing_link *curr_dl;
263
  struct disappearing_link *prev_dl = NULL;
555,948✔
264
  size_t index;
265

266
  GC_ASSERT(I_HOLD_LOCK());
555,948✔
267
  if (UNLIKELY(NULL == dl_hashtbl->head))
555,948✔
268
    return NULL;
×
269

270
  index = HASH2(link, dl_hashtbl->log_size);
555,948✔
271
  for (curr_dl = dl_hashtbl->head[index]; curr_dl;
555,948✔
272
       curr_dl = dl_next(curr_dl)) {
×
273
    if (curr_dl->dl_hidden_link == GC_HIDE_POINTER(link)) {
555,948✔
274
      /* Remove found entry from the table. */
275
      if (NULL == prev_dl) {
555,948✔
276
        dl_hashtbl->head[index] = dl_next(curr_dl);
555,948✔
277
        GC_dirty(dl_hashtbl->head + index);
555,948✔
278
      } else {
279
        dl_set_next(prev_dl, dl_next(curr_dl));
×
280
        GC_dirty(prev_dl);
×
281
      }
282
      dl_hashtbl->entries--;
555,948✔
283
      break;
555,948✔
284
    }
285
    prev_dl = curr_dl;
×
286
  }
287
  return curr_dl;
555,948✔
288
}
289

290
GC_API int GC_CALL
291
GC_unregister_disappearing_link(void **link)
277,974✔
292
{
293
  struct disappearing_link *curr_dl;
294

295
  if ((ADDR(link) & (ALIGNMENT - 1)) != 0) {
277,974✔
296
    /* Nothing to do. */
297
    return 0;
×
298
  }
299

300
  LOCK();
277,974✔
301
  curr_dl = GC_unregister_disappearing_link_inner(&GC_dl_hashtbl, link);
277,974✔
302
  UNLOCK();
277,974✔
303
  if (NULL == curr_dl)
277,974✔
304
    return 0;
×
305
  FREE_DL_ENTRY(curr_dl);
277,974✔
306
  return 1;
277,974✔
307
}
308

309
/*
310
 * Mark from one finalizable object using the specified mark procedure.
311
 * May not mark the object pointed to by `real_ptr` (i.e, it is the job
312
 * of the caller, if appropriate).  Note that this is called with the
313
 * mutator running.  This is safe only if the mutator (client) gets
314
 * the allocator lock to reveal hidden pointers.
315
 */
316
GC_INLINE void
317
GC_mark_fo(ptr_t real_ptr, finalization_mark_proc fo_mark_proc)
9,032,186✔
318
{
319
  GC_ASSERT(I_HOLD_LOCK());
9,032,186✔
320
  fo_mark_proc(real_ptr);
9,032,186✔
321
  /* Process objects pushed by the mark procedure. */
322
  while (!GC_mark_stack_empty())
13,691,858✔
323
    MARK_FROM_MARK_STACK();
4,659,672✔
324
}
9,032,186✔
325

326
/* Complete a collection in progress, if any. */
327
GC_INLINE void
328
GC_complete_ongoing_collection(void)
×
329
{
330
  if (UNLIKELY(GC_collection_in_progress())) {
×
331
    while (!GC_mark_some(NULL)) {
×
332
      /* Empty. */
333
    }
334
  }
335
}
×
336

337
/* Toggle-refs support. */
338

339
#  ifndef GC_TOGGLE_REFS_NOT_NEEDED
340
typedef union toggle_ref_u GCToggleRef;
341

342
STATIC GC_toggleref_func GC_toggleref_callback = 0;
343

344
GC_INNER void
345
GC_process_togglerefs(void)
31,049✔
346
{
347
  size_t i;
348
  size_t new_size = 0;
31,049✔
349
  GC_bool needs_barrier = FALSE;
31,049✔
350

351
  GC_ASSERT(I_HOLD_LOCK());
31,049✔
352
  for (i = 0; i < GC_toggleref_array_size; ++i) {
31,049✔
353
    GCToggleRef *r = &GC_toggleref_arr[i];
×
354
    void *obj = r->strong_ref;
×
355

356
    if ((ADDR(obj) & 1) != 0) {
×
357
      obj = GC_REVEAL_POINTER(r->weak_ref);
×
358
      GC_ASSERT((ADDR(obj) & 1) == 0);
×
359
    }
360
    if (NULL == obj)
×
361
      continue;
×
362

363
    switch (GC_toggleref_callback(obj)) {
×
364
    case GC_TOGGLE_REF_DROP:
×
365
      break;
×
366
    case GC_TOGGLE_REF_STRONG:
×
367
      GC_toggleref_arr[new_size++].strong_ref = obj;
×
368
      needs_barrier = TRUE;
×
369
      break;
×
370
    case GC_TOGGLE_REF_WEAK:
×
371
      GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
×
372
      break;
×
373
    default:
×
374
      ABORT("Bad toggle-ref status returned by callback");
×
375
    }
376
  }
377

378
  if (new_size < GC_toggleref_array_size) {
31,049✔
379
    BZERO(&GC_toggleref_arr[new_size],
×
380
          (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
381
    GC_toggleref_array_size = new_size;
×
382
  }
383
  if (needs_barrier)
31,049✔
384
    GC_dirty(GC_toggleref_arr); /*< entire object */
×
385
}
31,049✔
386

387
STATIC void GC_normal_finalize_mark_proc(ptr_t);
388

389
STATIC void
390
GC_mark_togglerefs(void)
31,049✔
391
{
392
  size_t i;
393

394
  GC_ASSERT(I_HOLD_LOCK());
31,049✔
395
  if (NULL == GC_toggleref_arr)
31,049✔
396
    return;
31,049✔
397

398
  GC_set_mark_bit(GC_toggleref_arr);
×
399
  for (i = 0; i < GC_toggleref_array_size; ++i) {
×
400
    void *obj = GC_toggleref_arr[i].strong_ref;
×
401
    if (obj != NULL && (ADDR(obj) & 1) == 0) {
×
402
      /* Push and mark the object. */
403
      GC_mark_fo((ptr_t)obj, GC_normal_finalize_mark_proc);
×
404
      GC_set_mark_bit(obj);
×
405
      GC_complete_ongoing_collection();
×
406
    }
407
  }
408
}
409

410
STATIC void
411
GC_clear_togglerefs(void)
31,049✔
412
{
413
  size_t i;
414

415
  GC_ASSERT(I_HOLD_LOCK());
31,049✔
416
  for (i = 0; i < GC_toggleref_array_size; ++i) {
31,049✔
417
    GCToggleRef *r = &GC_toggleref_arr[i];
×
418

419
    if ((ADDR(r->strong_ref) & 1) != 0) {
×
420
      if (!GC_is_marked(GC_REVEAL_POINTER(r->weak_ref))) {
×
421
        r->weak_ref = 0;
×
422
      } else {
423
        /* No need to copy, this garbage collector is a non-moving one. */
424
      }
425
    }
426
  }
427
}
31,049✔
428

429
GC_API void GC_CALL
430
GC_set_toggleref_func(GC_toggleref_func fn)
3✔
431
{
432
  LOCK();
3✔
433
  GC_toggleref_callback = fn;
3✔
434
  UNLOCK();
3✔
435
}
3✔
436

437
GC_API GC_toggleref_func GC_CALL
438
GC_get_toggleref_func(void)
3✔
439
{
440
  GC_toggleref_func fn;
441

442
  READER_LOCK();
3✔
443
  fn = GC_toggleref_callback;
3✔
444
  READER_UNLOCK();
3✔
445
  return fn;
3✔
446
}
447

448
static GC_bool
449
ensure_toggleref_capacity(size_t capacity_inc)
×
450
{
451
  GC_ASSERT(I_HOLD_LOCK());
×
452
  if (NULL == GC_toggleref_arr) {
×
453
    /* Set the initial capacity. */
454
    GC_toggleref_array_capacity = 32;
×
455

456
    GC_toggleref_arr = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
×
457
        GC_toggleref_array_capacity * sizeof(GCToggleRef), NORMAL);
458
    if (NULL == GC_toggleref_arr)
×
459
      return FALSE;
×
460
  }
461
  if (GC_toggleref_array_size + capacity_inc >= GC_toggleref_array_capacity) {
×
462
    GCToggleRef *new_array;
463
    while (GC_toggleref_array_capacity
×
464
           < GC_toggleref_array_size + capacity_inc) {
×
465
      GC_toggleref_array_capacity *= 2;
×
466
      if ((GC_toggleref_array_capacity
×
467
           & ((size_t)1 << (sizeof(size_t) * 8 - 1)))
468
          != 0) {
×
469
        /* An overflow. */
470
        return FALSE;
×
471
      }
472
    }
473

474
    new_array = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
×
475
        GC_toggleref_array_capacity * sizeof(GCToggleRef), NORMAL);
476
    if (UNLIKELY(NULL == new_array))
×
477
      return FALSE;
×
478
    if (LIKELY(GC_toggleref_array_size > 0))
×
479
      BCOPY(GC_toggleref_arr, new_array,
×
480
            GC_toggleref_array_size * sizeof(GCToggleRef));
481
    GC_INTERNAL_FREE(GC_toggleref_arr);
×
482
    GC_toggleref_arr = new_array;
×
483
  }
484
  return TRUE;
×
485
}
486

487
GC_API int GC_CALL
488
GC_toggleref_add(void *obj, int is_strong_ref)
63✔
489
{
490
  int res = GC_SUCCESS;
63✔
491

492
  GC_ASSERT(NONNULL_ARG_NOT_NULL(obj));
63✔
493
  LOCK();
63✔
494
  GC_ASSERT((ADDR(obj) & 1) == 0 && obj == GC_base(obj));
63✔
495
  if (GC_toggleref_callback != 0) {
63✔
496
    if (!ensure_toggleref_capacity(1)) {
×
497
      res = GC_NO_MEMORY;
×
498
    } else {
499
      GCToggleRef *r = &GC_toggleref_arr[GC_toggleref_array_size];
×
500

501
      if (is_strong_ref) {
×
502
        r->strong_ref = obj;
×
503
        GC_dirty(GC_toggleref_arr + GC_toggleref_array_size);
×
504
      } else {
505
        r->weak_ref = GC_HIDE_POINTER(obj);
×
506
        GC_ASSERT((r->weak_ref & 1) != 0);
×
507
      }
508
      GC_toggleref_array_size++;
×
509
    }
510
  }
511
  UNLOCK();
63✔
512
  return res;
63✔
513
}
514
#  endif /* !GC_TOGGLE_REFS_NOT_NEEDED */
515

516
/* Finalizer callback support. */
517

518
STATIC GC_await_finalize_proc GC_object_finalized_proc = 0;
519

520
GC_API void GC_CALL
521
GC_set_await_finalize_proc(GC_await_finalize_proc fn)
3✔
522
{
523
  LOCK();
3✔
524
  GC_object_finalized_proc = fn;
3✔
525
  UNLOCK();
3✔
526
}
3✔
527

528
GC_API GC_await_finalize_proc GC_CALL
529
GC_get_await_finalize_proc(void)
3✔
530
{
531
  GC_await_finalize_proc fn;
532

533
  READER_LOCK();
3✔
534
  fn = GC_object_finalized_proc;
3✔
535
  READER_UNLOCK();
3✔
536
  return fn;
3✔
537
}
538

539
#  ifndef GC_LONG_REFS_NOT_NEEDED
540
GC_API int GC_CALL
541
GC_register_long_link(void **link, const void *obj)
555,948✔
542
{
543
  if ((ADDR(link) & (ALIGNMENT - 1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
555,948✔
544
    ABORT("Bad arg to GC_register_long_link");
×
545
  return GC_register_disappearing_link_inner(&GC_ll_hashtbl, link, obj,
555,948✔
546
                                             "long dl");
547
}
548

549
GC_API int GC_CALL
550
GC_unregister_long_link(void **link)
277,974✔
551
{
552
  struct disappearing_link *curr_dl;
553

554
  if ((ADDR(link) & (ALIGNMENT - 1)) != 0) {
277,974✔
555
    /* Nothing to do. */
556
    return 0;
×
557
  }
558
  LOCK();
277,974✔
559
  curr_dl = GC_unregister_disappearing_link_inner(&GC_ll_hashtbl, link);
277,974✔
560
  UNLOCK();
277,974✔
561
  if (NULL == curr_dl)
277,974✔
562
    return 0;
×
563
  FREE_DL_ENTRY(curr_dl);
277,974✔
564
  return 1;
277,974✔
565
}
566
#  endif /* !GC_LONG_REFS_NOT_NEEDED */
567

568
#  ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED
569
STATIC int
570
GC_move_disappearing_link_inner(struct dl_hashtbl_s *dl_hashtbl, void **link,
1,667,844✔
571
                                void **new_link)
572
{
573
  struct disappearing_link *curr_dl, *new_dl;
574
  struct disappearing_link *prev_dl = NULL;
1,667,844✔
575
  size_t curr_index, new_index;
576
  GC_hidden_pointer curr_hidden_link, new_hidden_link;
577

578
#    ifdef GC_ASSERTIONS
579
  GC_noop1_ptr(*new_link);
1,667,844✔
580
#    endif
581
  GC_ASSERT(I_HOLD_LOCK());
1,667,844✔
582
  if (UNLIKELY(NULL == dl_hashtbl->head))
1,667,844✔
583
    return GC_NOT_FOUND;
×
584

585
  /* Find current link. */
586
  curr_index = HASH2(link, dl_hashtbl->log_size);
1,667,844✔
587
  curr_hidden_link = GC_HIDE_POINTER(link);
1,667,844✔
588
  for (curr_dl = dl_hashtbl->head[curr_index]; curr_dl;
1,699,309✔
589
       curr_dl = dl_next(curr_dl)) {
31,465✔
590
    if (curr_dl->dl_hidden_link == curr_hidden_link)
1,143,361✔
591
      break;
1,111,896✔
592
    prev_dl = curr_dl;
31,465✔
593
  }
594
  if (UNLIKELY(NULL == curr_dl)) {
1,667,844✔
595
    return GC_NOT_FOUND;
555,948✔
596
  } else if (link == new_link) {
1,111,896✔
597
    /* Nothing to do. */
598
    return GC_SUCCESS;
555,948✔
599
  }
600

601
  /* `link` is found; now check `new_link` is not present. */
602
  new_index = HASH2(new_link, dl_hashtbl->log_size);
555,948✔
603
  new_hidden_link = GC_HIDE_POINTER(new_link);
555,948✔
604
  for (new_dl = dl_hashtbl->head[new_index]; new_dl;
885,936✔
605
       new_dl = dl_next(new_dl)) {
329,988✔
606
    if (new_dl->dl_hidden_link == new_hidden_link) {
329,988✔
607
      /* Target already registered; bail out. */
608
      return GC_DUPLICATE;
×
609
    }
610
  }
611

612
  /* Remove from old, add to new, update `link`. */
613
  if (NULL == prev_dl) {
555,948✔
614
    dl_hashtbl->head[curr_index] = dl_next(curr_dl);
555,948✔
615
  } else {
616
    dl_set_next(prev_dl, dl_next(curr_dl));
×
617
    GC_dirty(prev_dl);
×
618
  }
619
  curr_dl->dl_hidden_link = new_hidden_link;
555,948✔
620
  dl_set_next(curr_dl, dl_hashtbl->head[new_index]);
555,948✔
621
  dl_hashtbl->head[new_index] = curr_dl;
555,948✔
622
  GC_dirty(curr_dl);
555,948✔
623
  GC_dirty(dl_hashtbl->head); /*< entire object */
555,948✔
624
  return GC_SUCCESS;
555,948✔
625
}
626

627
GC_API int GC_CALL
628
GC_move_disappearing_link(void **link, void **new_link)
833,922✔
629
{
630
  int result;
631

632
  if ((ADDR(new_link) & (ALIGNMENT - 1)) != 0
833,922✔
633
      || !NONNULL_ARG_NOT_NULL(new_link))
833,922✔
634
    ABORT("Bad new_link arg to GC_move_disappearing_link");
×
635
  if ((ADDR(link) & (ALIGNMENT - 1)) != 0) {
833,922✔
636
    /* Nothing to do. */
637
    return GC_NOT_FOUND;
×
638
  }
639
  LOCK();
833,922✔
640
  result = GC_move_disappearing_link_inner(&GC_dl_hashtbl, link, new_link);
833,922✔
641
  UNLOCK();
833,922✔
642
  return result;
833,922✔
643
}
644

645
#    ifndef GC_LONG_REFS_NOT_NEEDED
646
GC_API int GC_CALL
647
GC_move_long_link(void **link, void **new_link)
833,922✔
648
{
649
  int result;
650

651
  if ((ADDR(new_link) & (ALIGNMENT - 1)) != 0
833,922✔
652
      || !NONNULL_ARG_NOT_NULL(new_link))
833,922✔
653
    ABORT("Bad new_link arg to GC_move_long_link");
×
654
  if ((ADDR(link) & (ALIGNMENT - 1)) != 0) {
833,922✔
655
    /* Nothing to do. */
656
    return GC_NOT_FOUND;
×
657
  }
658
  LOCK();
833,922✔
659
  result = GC_move_disappearing_link_inner(&GC_ll_hashtbl, link, new_link);
833,922✔
660
  UNLOCK();
833,922✔
661
  return result;
833,922✔
662
}
663
#    endif
664
#  endif /* !GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED */
665

666
/*
667
 * Various finalization marker procedures.  Note that mark stack overflow
668
 * is handled by the caller, and is not a disaster.
669
 */
670

671
#  if defined(_MSC_VER) && defined(I386)
672
GC_ATTR_NOINLINE
673
/* Otherwise some optimizer bug is tickled in VC for x86 (v19, at least). */
674
#  endif
675
STATIC void
676
GC_normal_finalize_mark_proc(ptr_t p)
4,567,794✔
677
{
678
  GC_mark_stack_top = GC_ms_push_obj_hdr(p, HDR(p), GC_mark_stack_top,
4,567,794✔
679
                                         GC_mark_stack + GC_mark_stack_size);
4,567,794✔
680
}
4,567,794✔
681

682
/*
683
 * This only pays very partial attention to the mark descriptor.
684
 * It does the right thing for normal and atomic objects, and treats
685
 * most others as normal.
686
 */
687
STATIC void
688
GC_ignore_self_finalize_mark_proc(ptr_t p)
270,088✔
689
{
690
  const hdr *hhdr = HDR(p);
270,088✔
691
  word descr = hhdr->hb_descr;
270,088✔
692
  ptr_t current_p;
693
  ptr_t scan_limit;
694
  ptr_t target_limit = p + hhdr->hb_sz - 1;
270,088✔
695

696
  if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
270,088✔
697
    scan_limit = p + descr - sizeof(ptr_t);
270,088✔
698
  } else {
699
    scan_limit = target_limit + 1 - sizeof(ptr_t);
×
700
  }
701
  for (current_p = p; ADDR_GE(scan_limit, current_p); current_p += ALIGNMENT) {
1,536,540✔
702
    ptr_t q;
703

704
    LOAD_PTR_OR_CONTINUE(q, current_p);
1,266,452✔
705
    if (ADDR_LT(q, p) || ADDR_LT(target_limit, q)) {
1,266,452✔
706
      GC_PUSH_ONE_HEAP(q, current_p, GC_mark_stack_top);
1,266,452✔
707
    }
708
  }
709
}
270,088✔
710

711
STATIC void
712
GC_null_finalize_mark_proc(ptr_t p)
4,194,304✔
713
{
714
  UNUSED_ARG(p);
715
}
4,194,304✔
716

717
/*
718
 * `GC_unreachable_finalize_mark_proc` is an alias for normal marking,
719
 * but it is explicitly tested for, and triggers different behavior.
720
 * Objects registered in this way are not finalized if they are reachable
721
 * by other finalizable objects, even if those other objects specify
722
 * no ordering.
723
 */
724
STATIC void
725
GC_unreachable_finalize_mark_proc(ptr_t p)
1,303✔
726
{
727
  /*
728
   * A dummy comparison to ensure the compiler not to optimize two
729
   * identical functions into a single one (thus, to ensure a unique
730
   * address of each).  Alternatively, `GC_noop1_ptr(p)` could be used.
731
   */
732
  if (UNLIKELY(NULL == p))
1,303✔
733
    return;
×
734

735
  GC_normal_finalize_mark_proc(p);
1,303✔
736
}
737

738
/*
739
 * Register a finalization function.  See `gc.h` file for details.
740
 * The last parameter is a procedure that determines marking for
741
 * finalization ordering.  Any objects marked by that procedure will be
742
 * guaranteed to not have been finalized when this finalizer is invoked.
743
 */
744
STATIC void
745
GC_register_finalizer_inner(void *obj, GC_finalization_proc fn, void *cd,
4,920,327✔
746
                            GC_finalization_proc *ofn, void **ocd,
747
                            finalization_mark_proc mp)
748
{
749
  struct finalizable_object *curr_fo;
750
  size_t index;
751
  struct finalizable_object *new_fo = NULL;
4,920,327✔
752
  const hdr *hhdr = NULL; /*< initialized to prevent warning */
4,920,327✔
753

754
  GC_ASSERT(GC_is_initialized);
4,920,327✔
755
  if (UNLIKELY(GC_find_leak_inner)) {
4,920,327✔
756
    /* No-op.  `*ocd` and `*ofn` remain unchanged. */
757
    return;
×
758
  }
759
  LOCK();
4,920,327✔
760
  GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
4,920,327✔
761
  if (mp == GC_unreachable_finalize_mark_proc)
4,920,327✔
762
    GC_need_unreachable_finalization = TRUE;
63✔
763
  if (UNLIKELY(NULL == GC_fnlz_roots.fo_head)
4,920,327✔
764
      || UNLIKELY(GC_fo_entries > ((size_t)1 << GC_log_fo_table_size))) {
4,920,318✔
765
    GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
366✔
766
                  &GC_log_fo_table_size, &GC_fo_entries);
767
    GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
366✔
768
                       1U << GC_log_fo_table_size);
×
769
  }
770
  for (;;) {
×
771
    struct finalizable_object *prev_fo = NULL;
4,920,327✔
772
    GC_oom_func oom_fn;
773

774
    index = HASH2(obj, GC_log_fo_table_size);
4,920,327✔
775
    curr_fo = GC_fnlz_roots.fo_head[index];
4,920,327✔
776
    while (curr_fo != NULL) {
6,942,510✔
777
      GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
2,051,583✔
778
      if (curr_fo->fo_hidden_base == GC_HIDE_POINTER(obj)) {
2,051,583✔
779
        /*
780
         * Interruption by a signal in the middle of this should be safe.
781
         * The client may see only `*ocd` updated, but we will declare that
782
         * to be his problem.
783
         */
784
        if (ocd)
29,400✔
785
          *ocd = curr_fo->fo_client_data;
14,000✔
786
        if (ofn)
29,400✔
787
          *ofn = curr_fo->fo_fn;
14,000✔
788
        /* Delete the structure for `obj`. */
789
        if (NULL == prev_fo) {
29,400✔
790
          GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
29,398✔
791
        } else {
792
          fo_set_next(prev_fo, fo_next(curr_fo));
2✔
793
          GC_dirty(prev_fo);
2✔
794
        }
795
        if (0 == fn) {
29,400✔
796
          GC_fo_entries--;
1,400✔
797
          /*
798
           * May not happen if we get a signal.  But a high estimate will
799
           * only make the table larger than necessary.
800
           */
801
#  if !defined(THREADS) && !defined(DBG_HDRS_ALL)
802
          GC_free(curr_fo);
803
#  endif
804
        } else {
805
          curr_fo->fo_fn = fn;
28,000✔
806
          curr_fo->fo_client_data = (ptr_t)cd;
28,000✔
807
          curr_fo->fo_mark_proc = mp;
28,000✔
808
          GC_dirty(curr_fo);
28,000✔
809
          /*
810
           * Reinsert it.  We deleted it first to maintain consistency in
811
           * the event of a signal.
812
           */
813
          if (NULL == prev_fo) {
28,000✔
814
            GC_fnlz_roots.fo_head[index] = curr_fo;
28,000✔
815
          } else {
UNCOV
816
            fo_set_next(prev_fo, curr_fo);
×
UNCOV
817
            GC_dirty(prev_fo);
×
818
          }
819
        }
820
        if (NULL == prev_fo)
29,400✔
821
          GC_dirty(GC_fnlz_roots.fo_head + index);
29,398✔
822
        UNLOCK();
29,400✔
823
#  ifndef DBG_HDRS_ALL
824
        /* Free unused `new_fo` returned by `GC_oom_fn()`. */
825
        GC_free(new_fo);
29,400✔
826
#  endif
827
        return;
29,400✔
828
      }
829
      prev_fo = curr_fo;
2,022,183✔
830
      curr_fo = fo_next(curr_fo);
2,022,183✔
831
    }
832
    if (UNLIKELY(new_fo != NULL)) {
4,890,927✔
833
      /* `new_fo` is returned by `GC_oom_fn()`. */
834
      GC_ASSERT(fn != 0);
×
835
#  ifdef LINT2
836
      if (NULL == hhdr)
837
        ABORT("Bad hhdr in GC_register_finalizer_inner");
838
#  endif
839
      break;
×
840
    }
841
    if (0 == fn) {
4,890,927✔
842
      if (ocd)
208,582✔
843
        *ocd = NULL;
×
844
      if (ofn)
208,582✔
845
        *ofn = 0;
×
846
      UNLOCK();
208,582✔
847
      return;
208,582✔
848
    }
849
    GET_HDR(obj, hhdr);
4,682,345✔
850
    if (UNLIKELY(NULL == hhdr)) {
4,682,345✔
851
      /* We will not collect it, hence finalizer would not be run. */
852
      if (ocd)
×
853
        *ocd = NULL;
×
854
      if (ofn)
×
855
        *ofn = 0;
×
856
      UNLOCK();
×
857
      return;
×
858
    }
859
    new_fo = (struct finalizable_object *)GC_INTERNAL_MALLOC(
4,682,345✔
860
        sizeof(struct finalizable_object), NORMAL);
861
    if (LIKELY(new_fo != NULL))
4,682,345✔
862
      break;
4,682,345✔
863
    oom_fn = GC_oom_fn;
×
864
    UNLOCK();
×
865
    new_fo = (struct finalizable_object *)(*oom_fn)(
×
866
        sizeof(struct finalizable_object));
867
    if (NULL == new_fo) {
×
868
      /* No enough memory.  `*ocd` and `*ofn` remain unchanged. */
869
      return;
×
870
    }
871
    /* It is not likely we will make it here, but... */
872
    LOCK();
×
873
    /*
874
     * Recalculate index since the table may grow and check again that
875
     * our finalizer is not in the table.
876
     */
877
  }
878
  GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
4,682,345✔
879
  if (ocd)
4,682,345✔
880
    *ocd = NULL;
196,063✔
881
  if (ofn)
4,682,345✔
882
    *ofn = 0;
196,063✔
883
  new_fo->fo_hidden_base = GC_HIDE_POINTER(obj);
4,682,345✔
884
  new_fo->fo_fn = fn;
4,682,345✔
885
  new_fo->fo_client_data = (ptr_t)cd;
4,682,345✔
886
  new_fo->fo_object_sz = hhdr->hb_sz;
4,682,345✔
887
  new_fo->fo_mark_proc = mp;
4,682,345✔
888
  fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
4,682,345✔
889
  GC_dirty(new_fo);
4,682,345✔
890
  GC_fo_entries++;
4,682,345✔
891
  GC_fnlz_roots.fo_head[index] = new_fo;
4,682,345✔
892
  GC_dirty(GC_fnlz_roots.fo_head + index);
4,682,345✔
893
  UNLOCK();
4,682,345✔
894
}
895

896
GC_API void GC_CALL
897
GC_register_finalizer(void *obj, GC_finalization_proc fn, void *cd,
277,978✔
898
                      GC_finalization_proc *ofn, void **ocd)
899
{
900
  GC_register_finalizer_inner(obj, fn, cd, ofn, ocd,
277,978✔
901
                              GC_normal_finalize_mark_proc);
902
}
277,978✔
903

904
GC_API void GC_CALL
905
GC_register_finalizer_ignore_self(void *obj, GC_finalization_proc fn, void *cd,
447,982✔
906
                                  GC_finalization_proc *ofn, void **ocd)
907
{
908
  GC_register_finalizer_inner(obj, fn, cd, ofn, ocd,
447,982✔
909
                              GC_ignore_self_finalize_mark_proc);
910
}
447,982✔
911

912
GC_API void GC_CALL
913
GC_register_finalizer_no_order(void *obj, GC_finalization_proc fn, void *cd,
4,194,304✔
914
                               GC_finalization_proc *ofn, void **ocd)
915
{
916
  GC_register_finalizer_inner(obj, fn, cd, ofn, ocd,
4,194,304✔
917
                              GC_null_finalize_mark_proc);
918
}
4,194,304✔
919

920
GC_API void GC_CALL
921
GC_register_finalizer_unreachable(void *obj, GC_finalization_proc fn, void *cd,
63✔
922
                                  GC_finalization_proc *ofn, void **ocd)
923
{
924
  GC_ASSERT(GC_java_finalization);
63✔
925
  GC_register_finalizer_inner(obj, fn, cd, ofn, ocd,
63✔
926
                              GC_unreachable_finalize_mark_proc);
927
}
63✔
928

929
#  ifndef NO_DEBUGGING
930
STATIC void
931
GC_dump_finalization_links(const struct dl_hashtbl_s *dl_hashtbl)
6✔
932
{
933
  size_t dl_size = (size_t)1 << dl_hashtbl->log_size;
6✔
934
  size_t i;
935

936
  if (NULL == dl_hashtbl->head) {
6✔
937
    /* The table is empty. */
938
    return;
×
939
  }
940

941
  for (i = 0; i < dl_size; i++) {
98,310✔
942
    struct disappearing_link *curr_dl;
943

944
    for (curr_dl = dl_hashtbl->head[i]; curr_dl != NULL;
98,307✔
945
         curr_dl = dl_next(curr_dl)) {
3✔
946
      ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_obj);
3✔
947
      ptr_t real_link = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_link);
3✔
948

949
      GC_printf("Object: %p, link value: %p, link addr: %p\n",
3✔
950
                (void *)real_ptr, *(void **)real_link, (void *)real_link);
951
    }
952
  }
953
}
954

955
GC_API void GC_CALL
956
GC_dump_finalization(void)
3✔
957
{
958
  struct finalizable_object *curr_fo;
959
  size_t i;
960
  size_t fo_size
3✔
961
      = GC_fnlz_roots.fo_head == NULL ? 0 : (size_t)1 << GC_log_fo_table_size;
3✔
962

963
  GC_printf("\n***Disappearing (short) links:\n");
3✔
964
  GC_dump_finalization_links(&GC_dl_hashtbl);
3✔
965
#    ifndef GC_LONG_REFS_NOT_NEEDED
966
  GC_printf("\n***Disappearing long links:\n");
3✔
967
  GC_dump_finalization_links(&GC_ll_hashtbl);
3✔
968
#    endif
969
  GC_printf("\n***Finalizers:\n");
3✔
970
  for (i = 0; i < fo_size; i++) {
49,155✔
971
    for (curr_fo = GC_fnlz_roots.fo_head[i]; curr_fo != NULL;
49,155✔
972
         curr_fo = fo_next(curr_fo)) {
3✔
973
      ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
3✔
974

975
      GC_printf("Finalizable object: %p\n", (void *)real_ptr);
3✔
976
    }
977
  }
978
}
3✔
979
#  endif /* !NO_DEBUGGING */
980

981
#  ifndef THREADS
982
/*
983
 * Checks and updates the level of finalizers recursion.
984
 * Returns `NULL` if `GC_invoke_finalizers()` should not be called by
985
 * the collector (to minimize the risk of a deep finalizers recursion),
986
 * otherwise returns a pointer to `GC_finalizer_nested`.
987
 */
988
STATIC unsigned char *
989
GC_check_finalizer_nested(void)
990
{
991
  unsigned nesting_level = GC_finalizer_nested;
992
  if (nesting_level) {
993
    /*
994
     * We are inside another `GC_invoke_finalizers()`.  Skip some
995
     * implicitly-called `GC_invoke_finalizers()` depending on the
996
     * nesting (recursion) level.
997
     */
998
    if ((unsigned)(++GC_finalizer_skipped) < (1U << nesting_level))
999
      return NULL;
1000
    GC_finalizer_skipped = 0;
1001
  }
1002
  GC_finalizer_nested = (unsigned char)(nesting_level + 1);
1003
  return &GC_finalizer_nested;
1004
}
1005
#  endif /* !THREADS */
1006

1007
GC_INLINE void
1008
GC_make_disappearing_links_disappear(struct dl_hashtbl_s *dl_hashtbl,
124,196✔
1009
                                     GC_bool is_remove_dangling)
1010
{
1011
  size_t i;
1012
  size_t dl_size = (size_t)1 << dl_hashtbl->log_size;
124,196✔
1013
  GC_bool needs_barrier = FALSE;
124,196✔
1014

1015
  GC_ASSERT(I_HOLD_LOCK());
124,196✔
1016
  if (NULL == dl_hashtbl->head) {
124,196✔
1017
    /* The table is empty. */
1018
    return;
84,322✔
1019
  }
1020

1021
  for (i = 0; i < dl_size; i++) {
182,855,782✔
1022
    struct disappearing_link *curr_dl, *next_dl;
1023
    struct disappearing_link *prev_dl = NULL;
182,815,908✔
1024

1025
    for (curr_dl = dl_hashtbl->head[i]; curr_dl != NULL; curr_dl = next_dl) {
231,042,962✔
1026
      next_dl = dl_next(curr_dl);
48,227,054✔
1027
#  if defined(GC_ASSERTIONS) && !defined(THREAD_SANITIZER)
1028
      /* Check accessibility of the location pointed by the link. */
1029
      GC_noop1_ptr(*(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link));
48,227,054✔
1030
#  endif
1031
      if (is_remove_dangling) {
48,227,054✔
1032
        ptr_t real_link
1033
            = (ptr_t)GC_base(GC_REVEAL_POINTER(curr_dl->dl_hidden_link));
23,755,480✔
1034

1035
        if (NULL == real_link || LIKELY(GC_is_marked(real_link))) {
23,755,480✔
1036
          prev_dl = curr_dl;
23,755,480✔
1037
          continue;
23,755,480✔
1038
        }
1039
      } else {
1040
        if (LIKELY(GC_is_marked(
24,471,574✔
1041
                (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_obj)))) {
1042
          prev_dl = curr_dl;
23,755,480✔
1043
          continue;
23,755,480✔
1044
        }
1045
        *(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link) = NULL;
716,094✔
1046
      }
1047

1048
      /* Delete `curr_dl` entry from `dl_hashtbl`. */
1049
      if (NULL == prev_dl) {
716,094✔
1050
        dl_hashtbl->head[i] = next_dl;
692,640✔
1051
        needs_barrier = TRUE;
692,640✔
1052
      } else {
1053
        dl_set_next(prev_dl, next_dl);
23,454✔
1054
        GC_dirty(prev_dl);
23,454✔
1055
      }
1056
      GC_clear_mark_bit(curr_dl);
716,094✔
1057
      dl_hashtbl->entries--;
716,094✔
1058
    }
1059
  }
1060
  if (needs_barrier)
39,874✔
1061
    GC_dirty(dl_hashtbl->head); /*< entire object */
1,585✔
1062
}
1063

1064
GC_INNER void
1065
GC_finalize(void)
31,049✔
1066
{
1067
  struct finalizable_object *curr_fo, *prev_fo, *next_fo;
1068
  ptr_t real_ptr;
1069
  size_t i;
1070
  size_t fo_size
31,049✔
1071
      = GC_fnlz_roots.fo_head == NULL ? 0 : (size_t)1 << GC_log_fo_table_size;
31,049✔
1072
  GC_bool needs_barrier = FALSE;
31,049✔
1073

1074
  GC_ASSERT(I_HOLD_LOCK());
31,049✔
1075
#  ifndef SMALL_CONFIG
1076
  /* Save current `GC_dl_entries` value for stats printing. */
1077
  GC_old_dl_entries = GC_dl_hashtbl.entries;
31,049✔
1078
#    ifndef GC_LONG_REFS_NOT_NEEDED
1079
  /* Save current `GC_ll_entries` value for stats printing. */
1080
  GC_old_ll_entries = GC_ll_hashtbl.entries;
31,049✔
1081
#    endif
1082
#  endif
1083

1084
#  ifndef GC_TOGGLE_REFS_NOT_NEEDED
1085
  GC_mark_togglerefs();
31,049✔
1086
#  endif
1087
  GC_make_disappearing_links_disappear(&GC_dl_hashtbl, FALSE);
31,049✔
1088

1089
  /*
1090
   * Mark all objects reachable via chains of 1 or more pointers from
1091
   * finalizable objects.
1092
   */
1093
  GC_ASSERT(!GC_collection_in_progress());
31,049✔
1094
  for (i = 0; i < fo_size; i++) {
205,627,407✔
1095
    for (curr_fo = GC_fnlz_roots.fo_head[i]; curr_fo != NULL;
238,411,588✔
1096
         curr_fo = fo_next(curr_fo)) {
32,815,230✔
1097
      GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
32,815,230✔
1098
      real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
32,815,230✔
1099
      if (!GC_is_marked(real_ptr)) {
32,815,230✔
1100
        GC_MARKED_FOR_FINALIZATION(real_ptr);
1101
        GC_mark_fo(real_ptr, curr_fo->fo_mark_proc);
4,837,882✔
1102
        if (GC_is_marked(real_ptr)) {
4,837,882✔
1103
          WARN("Finalization cycle involving %p\n", real_ptr);
×
1104
        }
1105
      }
1106
    }
1107
  }
1108
  /* Enqueue for finalization all objects that are still unreachable. */
1109
  GC_bytes_finalized = 0;
31,049✔
1110
  for (i = 0; i < fo_size; i++) {
205,627,407✔
1111
    curr_fo = GC_fnlz_roots.fo_head[i];
205,596,358✔
1112
    prev_fo = NULL;
205,596,358✔
1113
    while (curr_fo != NULL) {
238,411,588✔
1114
      real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
32,815,230✔
1115
      if (!GC_is_marked(real_ptr)) {
32,815,230✔
1116
        if (!GC_java_finalization) {
4,762,441✔
1117
          GC_set_mark_bit(real_ptr);
×
1118
        }
1119
        /* Delete from hash table. */
1120
        next_fo = fo_next(curr_fo);
4,762,441✔
1121
        if (NULL == prev_fo) {
4,762,441✔
1122
          GC_fnlz_roots.fo_head[i] = next_fo;
3,235,108✔
1123
          if (GC_object_finalized_proc) {
3,235,108✔
1124
            GC_dirty(GC_fnlz_roots.fo_head + i);
×
1125
          } else {
1126
            needs_barrier = TRUE;
3,235,108✔
1127
          }
1128
        } else {
1129
          fo_set_next(prev_fo, next_fo);
1,527,333✔
1130
          GC_dirty(prev_fo);
1,527,333✔
1131
        }
1132
        GC_fo_entries--;
4,762,441✔
1133
        if (GC_object_finalized_proc)
4,762,441✔
1134
          GC_object_finalized_proc(real_ptr);
×
1135

1136
        /* Add to list of objects awaiting finalization. */
1137
        fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
4,762,441✔
1138
        GC_dirty(curr_fo);
4,762,441✔
1139
        SET_FINALIZE_NOW(curr_fo);
4,762,441✔
1140
        /* Unhide object pointer so any future collections will see it. */
1141
        curr_fo->fo_hidden_base
1142
            = (GC_hidden_pointer)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
4,762,441✔
1143

1144
        GC_bytes_finalized
4,762,441✔
1145
            += (word)curr_fo->fo_object_sz + sizeof(struct finalizable_object);
4,762,441✔
1146
        GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
4,762,441✔
1147
        curr_fo = next_fo;
4,762,441✔
1148
      } else {
1149
        prev_fo = curr_fo;
28,052,789✔
1150
        curr_fo = fo_next(curr_fo);
28,052,789✔
1151
      }
1152
    }
1153
  }
1154

1155
  if (GC_java_finalization) {
31,049✔
1156
    /*
1157
     * Make sure we mark everything reachable from objects finalized
1158
     * using the no-order `fo_mark_proc`.
1159
     */
1160
    for (curr_fo = GC_fnlz_roots.finalize_now; curr_fo != NULL;
4,793,492✔
1161
         curr_fo = fo_next(curr_fo)) {
4,762,443✔
1162
      real_ptr = (ptr_t)curr_fo->fo_hidden_base; /*< revealed */
4,762,443✔
1163
      if (!GC_is_marked(real_ptr)) {
4,762,443✔
1164
        if (curr_fo->fo_mark_proc == GC_null_finalize_mark_proc) {
4,762,441✔
1165
          GC_mark_fo(real_ptr, GC_normal_finalize_mark_proc);
4,194,304✔
1166
        }
1167
        if (curr_fo->fo_mark_proc != GC_unreachable_finalize_mark_proc) {
4,762,441✔
1168
          GC_set_mark_bit(real_ptr);
4,761,138✔
1169
        }
1170
      }
1171
    }
1172

1173
    /*
1174
     * Now revive finalize-when-unreachable objects reachable from other
1175
     * finalizable objects.
1176
     */
1177
    if (GC_need_unreachable_finalization) {
31,049✔
1178
      curr_fo = GC_fnlz_roots.finalize_now;
14,196✔
1179
#  if defined(GC_ASSERTIONS) || defined(LINT2)
1180
      if (curr_fo != NULL && NULL == GC_fnlz_roots.fo_head)
14,196✔
1181
        ABORT("GC_fnlz_roots.fo_head is null");
×
1182
#  endif
1183
      for (prev_fo = NULL; curr_fo != NULL;
373,747✔
1184
           prev_fo = curr_fo, curr_fo = next_fo) {
359,551✔
1185
        next_fo = fo_next(curr_fo);
359,551✔
1186
        if (curr_fo->fo_mark_proc != GC_unreachable_finalize_mark_proc)
359,551✔
1187
          continue;
358,248✔
1188

1189
        real_ptr = (ptr_t)curr_fo->fo_hidden_base; /*< revealed */
1,303✔
1190
        if (!GC_is_marked(real_ptr)) {
1,303✔
1191
          GC_set_mark_bit(real_ptr);
1,303✔
1192
          continue;
1,303✔
1193
        }
1194
        if (NULL == prev_fo) {
×
1195
          SET_FINALIZE_NOW(next_fo);
×
1196
        } else {
1197
          fo_set_next(prev_fo, next_fo);
×
1198
          GC_dirty(prev_fo);
×
1199
        }
1200
        curr_fo->fo_hidden_base = GC_HIDE_POINTER(real_ptr);
×
1201
        GC_bytes_finalized
×
1202
            -= (word)curr_fo->fo_object_sz + sizeof(struct finalizable_object);
×
1203

1204
        i = HASH2(real_ptr, GC_log_fo_table_size);
×
1205
        fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
×
1206
        GC_dirty(curr_fo);
×
1207
        GC_fo_entries++;
×
1208
        GC_fnlz_roots.fo_head[i] = curr_fo;
×
1209
        curr_fo = prev_fo;
×
1210
        needs_barrier = TRUE;
×
1211
      }
1212
    }
1213
  }
1214
  if (needs_barrier)
31,049✔
1215
    GC_dirty(GC_fnlz_roots.fo_head); /*< entire object */
1,573✔
1216

1217
  /* Remove dangling disappearing links. */
1218
  GC_make_disappearing_links_disappear(&GC_dl_hashtbl, TRUE);
31,049✔
1219

1220
#  ifndef GC_TOGGLE_REFS_NOT_NEEDED
1221
  GC_clear_togglerefs();
31,049✔
1222
#  endif
1223
#  ifndef GC_LONG_REFS_NOT_NEEDED
1224
  GC_make_disappearing_links_disappear(&GC_ll_hashtbl, FALSE);
31,049✔
1225
  GC_make_disappearing_links_disappear(&GC_ll_hashtbl, TRUE);
31,049✔
1226
#  endif
1227

1228
  if (GC_alloc_fail_count > 0) {
31,049✔
1229
    /*
1230
     * Do not prevent running finalizers if there has been an allocation
1231
     * failure recently.
1232
     */
1233
#  ifdef THREADS
1234
    GC_reset_finalizer_nested();
×
1235
#  else
1236
    GC_finalizer_nested = 0;
1237
#  endif
1238
  }
1239
}
31,049✔
1240

1241
/*
1242
 * Count of finalizers to run, at most, during a single invocation
1243
 * of `GC_invoke_finalizers()`; zero means no limit.  Accessed with the
1244
 * allocator lock held.
1245
 */
1246
STATIC unsigned GC_interrupt_finalizers = 0;
1247

1248
#  ifndef JAVA_FINALIZATION_NOT_NEEDED
1249

1250
/*
1251
 * Enqueue all remaining finalizers to be run.  A collection in progress,
1252
 * if any, is completed when the first finalizer is enqueued.
1253
 */
1254
STATIC void
1255
GC_enqueue_all_finalizers(void)
×
1256
{
1257
  size_t i;
1258
  size_t fo_size
×
1259
      = GC_fnlz_roots.fo_head == NULL ? 0 : (size_t)1 << GC_log_fo_table_size;
×
1260

1261
  GC_ASSERT(I_HOLD_LOCK());
×
1262
  GC_bytes_finalized = 0;
×
1263
  for (i = 0; i < fo_size; i++) {
×
1264
    struct finalizable_object *curr_fo = GC_fnlz_roots.fo_head[i];
×
1265

1266
    GC_fnlz_roots.fo_head[i] = NULL;
×
1267
    while (curr_fo != NULL) {
×
1268
      struct finalizable_object *next_fo;
1269
      ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
×
1270

1271
      GC_mark_fo(real_ptr, GC_normal_finalize_mark_proc);
×
1272
      GC_set_mark_bit(real_ptr);
×
1273
      GC_complete_ongoing_collection();
×
1274
      next_fo = fo_next(curr_fo);
×
1275

1276
      /* Add to list of objects awaiting finalization. */
1277
      fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
×
1278
      GC_dirty(curr_fo);
×
1279
      SET_FINALIZE_NOW(curr_fo);
×
1280

1281
      /* Unhide object pointer so any future collections will see it. */
1282
      curr_fo->fo_hidden_base
1283
          = (GC_hidden_pointer)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
×
1284
      GC_bytes_finalized
×
1285
          += curr_fo->fo_object_sz + sizeof(struct finalizable_object);
×
1286
      curr_fo = next_fo;
×
1287
    }
1288
  }
1289
  /* All entries are deleted from the hash table. */
1290
  GC_fo_entries = 0;
×
1291
}
×
1292

1293
static int invoke_finalizers_internal(GC_bool all);
1294

1295
GC_API void GC_CALL
1296
GC_finalize_all(void)
3✔
1297
{
1298
  for (;;) {
1299
    LOCK();
3✔
1300
    if (0 == GC_fo_entries) {
3✔
1301
      UNLOCK();
3✔
1302
      break;
3✔
1303
    }
1304
    GC_enqueue_all_finalizers();
×
1305
    UNLOCK();
×
1306

1307
    /*
1308
     * Running the finalizers in this thread is arguably not a good idea
1309
     * when we should be notifying another thread to run them.
1310
     * But otherwise we do not have a great way to wait for them to run.
1311
     */
1312
    (void)invoke_finalizers_internal(TRUE);
×
1313
  }
1314
}
3✔
1315

1316
#  endif /* !JAVA_FINALIZATION_NOT_NEEDED */
1317

1318
GC_API void GC_CALL
1319
GC_set_interrupt_finalizers(unsigned value)
3✔
1320
{
1321
  LOCK();
3✔
1322
  GC_interrupt_finalizers = value;
3✔
1323
  UNLOCK();
3✔
1324
}
3✔
1325

1326
GC_API unsigned GC_CALL
1327
GC_get_interrupt_finalizers(void)
3✔
1328
{
1329
  unsigned value;
1330

1331
  READER_LOCK();
3✔
1332
  value = GC_interrupt_finalizers;
3✔
1333
  READER_UNLOCK();
3✔
1334
  return value;
3✔
1335
}
1336

1337
GC_API int GC_CALL
1338
GC_should_invoke_finalizers(void)
22,066,671✔
1339
{
1340
#  ifdef AO_HAVE_load
1341
  return GC_cptr_load((volatile ptr_t *)&GC_fnlz_roots.finalize_now) != NULL;
22,066,671✔
1342
#  else
1343
  return GC_fnlz_roots.finalize_now != NULL;
1344
#  endif /* !THREADS */
1345
}
1346

1347
GC_API int GC_CALL
1348
GC_invoke_finalizers(void)
50✔
1349
{
1350
  GC_ASSERT(I_DONT_HOLD_LOCK());
50✔
1351
  return invoke_finalizers_internal(FALSE);
50✔
1352
}
1353

1354
static int
1355
invoke_finalizers_internal(GC_bool all)
3,592✔
1356
{
1357
  int count = 0;
3,592✔
1358
  word bytes_freed_before = 0; /*< initialized to prevent warning */
3,592✔
1359

1360
  while (GC_should_invoke_finalizers()) {
4,766,033✔
1361
    struct finalizable_object *curr_fo;
1362
    ptr_t real_ptr;
1363

1364
    LOCK();
4,762,722✔
1365
    if (0 == count) {
4,762,722✔
1366
      /* Note: we hold the allocator lock here. */
1367
      bytes_freed_before = GC_bytes_freed;
3,540✔
1368
    } else if (UNLIKELY(GC_interrupt_finalizers != 0) && !all
4,759,182✔
1369
               && (unsigned)count >= GC_interrupt_finalizers) {
×
1370
      UNLOCK();
×
1371
      break;
×
1372
    }
1373
    curr_fo = GC_fnlz_roots.finalize_now;
4,762,722✔
1374
#  ifdef THREADS
1375
    if (UNLIKELY(NULL == curr_fo)) {
4,762,722✔
1376
      UNLOCK();
281✔
1377
      break;
281✔
1378
    }
1379
#  endif
1380
    SET_FINALIZE_NOW(fo_next(curr_fo));
4,762,441✔
1381
    UNLOCK();
4,762,441✔
1382
    fo_set_next(curr_fo, NULL);
4,762,441✔
1383
    real_ptr = (ptr_t)curr_fo->fo_hidden_base; /*< revealed */
4,762,441✔
1384
    curr_fo->fo_fn(real_ptr, curr_fo->fo_client_data);
4,762,441✔
1385
    curr_fo->fo_client_data = NULL;
4,762,441✔
1386
    ++count;
4,762,441✔
1387
    /*
1388
     * Explicit freeing of `curr_fo` is probably a bad idea.
1389
     * It throws off accounting if nearly all objects are finalizable.
1390
     * Otherwise it should not matter.
1391
     */
1392
  }
1393
  /* `bytes_freed_before` is initialized whenever `count` is nonzero. */
1394
  if (count != 0
3,592✔
1395
#  if defined(THREADS) && !defined(THREAD_SANITIZER)
1396
      /*
1397
       * A quick check whether some memory was freed.  The race with
1398
       * `GC_free()` is safe to be ignored because we only need to know
1399
       * if the current thread has deallocated something.
1400
       */
1401
      && bytes_freed_before != GC_bytes_freed
3,539✔
1402
#  endif
1403
  ) {
1404
    LOCK();
1,591✔
1405
    GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
1,591✔
1406
    UNLOCK();
1,591✔
1407
  }
1408
  return count;
3,592✔
1409
}
1410

1411
GC_INNER void
1412
GC_notify_or_invoke_finalizers(void)
17,300,638✔
1413
{
1414
  GC_finalizer_notifier_proc notifier_fn = 0;
17,300,638✔
1415

1416
#  if defined(THREADS) && !defined(KEEP_BACK_PTRS) && !defined(MAKE_BACK_GRAPH)
1417
  /* Quick check (while unlocked) for an empty finalization queue. */
1418
  if (!GC_should_invoke_finalizers())
17,300,638✔
1419
    return;
17,297,086✔
1420
#  endif
1421
  LOCK();
3,552✔
1422

1423
#  if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1424
  /*
1425
   * This is a convenient place to generate backtraces if appropriate,
1426
   * since that code is not callable with the allocator lock.
1427
   */
1428
  if (GC_gc_no != GC_last_back_trace_gc_no
1429
      && LIKELY(GC_gc_no > 1) /*< skip initial collection */) {
1430
#    ifdef KEEP_BACK_PTRS
1431
    static GC_bool bt_in_progress = FALSE;
1432

1433
    if (!bt_in_progress) {
1434
      long i;
1435

1436
      /* Prevent a recursion or parallel usage. */
1437
      bt_in_progress = TRUE;
1438
      for (i = 0; i < GC_backtraces; ++i) {
1439
        /*
1440
         * FIXME: This tolerates concurrent heap mutation, which may
1441
         * cause occasional mysterious results.  We need to release
1442
         * the allocator lock, since `GC_print_callers()` acquires it.
1443
         * It probably should not.
1444
         */
1445
        void *current = GC_generate_random_valid_address();
1446

1447
        UNLOCK();
1448
        GC_printf("\n***Chosen address %p in object\n", current);
1449
        GC_print_backtrace(current);
1450
        LOCK();
1451
      }
1452
      bt_in_progress = FALSE;
1453
    }
1454
#    endif
1455
    GC_last_back_trace_gc_no = GC_gc_no;
1456
#    ifdef MAKE_BACK_GRAPH
1457
    if (GC_print_back_height) {
1458
      GC_print_back_graph_stats();
1459
    }
1460
#    endif
1461
  }
1462
#  endif
1463
  if (NULL == GC_fnlz_roots.finalize_now) {
3,552✔
1464
    UNLOCK();
10✔
1465
    return;
10✔
1466
  }
1467

1468
  if (!GC_finalize_on_demand) {
3,542✔
1469
    unsigned char *pnested;
1470

1471
#  ifdef THREADS
1472
    if (UNLIKELY(GC_in_thread_creation)) {
3,542✔
1473
      UNLOCK();
×
1474
      return;
×
1475
    }
1476
#  endif
1477
    pnested = GC_check_finalizer_nested();
3,542✔
1478
    UNLOCK();
3,542✔
1479
    /* Skip `GC_invoke_finalizers()` if nested. */
1480
    if (pnested != NULL) {
3,542✔
1481
      (void)invoke_finalizers_internal(FALSE);
3,542✔
1482
      /* Reset since no more finalizers or interrupted. */
1483
      *pnested = 0;
3,542✔
1484
#  ifndef THREADS
1485
      GC_ASSERT(NULL == GC_fnlz_roots.finalize_now
1486
                || GC_interrupt_finalizers > 0);
1487
#  else
1488
      /*
1489
       * Note: in the multi-threaded case GC can run concurrently and
1490
       * add more finalizers to run.
1491
       */
1492
#  endif
1493
    }
1494
    return;
3,542✔
1495
  }
1496

1497
  /* These variables require synchronization to avoid data race. */
1498
  if (GC_last_finalizer_notification != GC_gc_no) {
×
1499
    notifier_fn = GC_finalizer_notifier;
×
1500
    GC_last_finalizer_notification = GC_gc_no;
×
1501
  }
1502
  UNLOCK();
×
1503
  if (notifier_fn != 0) {
×
1504
    /* Invoke the notifier. */
1505
    (*notifier_fn)();
×
1506
  }
1507
}
1508

1509
#  ifndef SMALL_CONFIG
1510
#    ifndef GC_LONG_REFS_NOT_NEEDED
1511
#      define IF_LONG_REFS_PRESENT_ELSE(x, y) (x)
1512
#    else
1513
#      define IF_LONG_REFS_PRESENT_ELSE(x, y) (y)
1514
#    endif
1515

1516
GC_INNER void
1517
GC_print_finalization_stats(void)
×
1518
{
1519
  const struct finalizable_object *fo;
1520
  unsigned long ready = 0;
×
1521

1522
  GC_log_printf(
×
1523
      "%lu finalization entries;"
1524
      " %lu/%lu short/long disappearing links alive\n",
1525
      (unsigned long)GC_fo_entries, (unsigned long)GC_dl_hashtbl.entries,
×
1526
      (unsigned long)IF_LONG_REFS_PRESENT_ELSE(GC_ll_hashtbl.entries, 0));
×
1527

1528
  for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
×
1529
    ++ready;
×
1530
  GC_log_printf(
×
1531
      "%lu finalization-ready objects; %ld/%ld short/long links cleared\n",
1532
      ready, (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
×
1533
      (long)IF_LONG_REFS_PRESENT_ELSE(
×
1534
          GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
1535
}
×
1536
#  endif /* !SMALL_CONFIG */
1537

1538
#endif /* !GC_NO_FINALIZATION */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc