• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1484

12 Apr 2023 08:54AM UTC coverage: 76.502% (+0.07%) from 76.429%
1484

push

travis-ci-com

ivmai
Do not add extra byte to large ignore-off-page objects

For ignore-off-page objects the client should guarantee the pointer
within the first heap block of the object, thus no need to add an extra
byte for such objects if the object size of at least one heap block.

* allchblk.c (setup_header): Add assertion that byte_sz is not less
than ALIGNMENT.
* allchblk.c [ALIGNMENT>GC_DS_TAGS] (setup_header): Modify descr local
variable to make it zero if IGNORE_OFF_PAGE flag is set and kind is
NORMAL (and object size is not less than HBLKSIZE); add comment.
* mallocx.c [ALIGNMENT>GC_DS_TAGS] (GC_realloc): Likewise.
* include/gc/gc.h (GC_all_interior_pointers): Update comment.
* include/private/gc_priv.h [MAX_EXTRA_BYTES==0] (ADD_EXTRA_BYTES):
Define as no-op.
* malloc.c (GC_generic_malloc_inner): Define lb_adjusted local
variable; pass lb_adjusted to GC_alloc_large_and_clear().
* malloc.c [MAX_EXTRA_BYTES>0] (GC_generic_malloc_inner): Set
lb_adjusted to lb if IGNORE_OFF_PAGE flag is set and lb is not less
than HBLKSIZE.
* malloc.c [MAX_EXTRA_BYTES>0] (GC_generic_malloc_aligned): Set
lb_rounded without EXTRA_BYTES added (and compute lg based on
lb_rounded) if IGNORE_OFF_PAGE is set and lb is not less than HBLKSIZE.
* mallocx.c (GC_realloc): Define ok local variable.
* typd_mlc.c (GC_malloc_explicitly_typed_ignore_off_page): Remove
lb_adjusted local variable; call GC_malloc_explicitly_typed() if
lb is smaller than HBLKSIZE-sizeof(word), otherwise pass lb plus
sizeof(word) (instead of lb plus TYPD_EXTRA_BYTES) to
GC_generic_malloc_aligned; add comment.

24 of 24 new or added lines in 4 files covered. (100.0%)

7765 of 10150 relevant lines covered (76.5%)

8458785.63 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

68.48
/finalize.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (c) 2007 Free Software Foundation, Inc.
6
 * Copyright (c) 2008-2022 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_pmark.h"
19

20
#ifndef GC_NO_FINALIZATION
21
# include "gc/javaxfc.h" /* to get GC_finalize_all() as extern "C" */
22

23
/* Type of mark procedure used for marking from finalizable object.     */
24
/* This procedure normally does not mark the object, only its           */
25
/* descendants.                                                         */
26
typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
27

28
#define HASH3(addr,size,log_size) \
29
        ((((word)(addr) >> 3) ^ ((word)(addr) >> (3 + (log_size)))) \
30
         & ((size) - 1))
31
#define HASH2(addr,log_size) HASH3(addr, (word)1 << (log_size), log_size)
32

33
struct hash_chain_entry {
34
    word hidden_key;
35
    struct hash_chain_entry * next;
36
};
37

38
struct disappearing_link {
39
    struct hash_chain_entry prolog;
40
#   define dl_hidden_link prolog.hidden_key
41
                                /* Field to be cleared.         */
42
#   define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
43
#   define dl_set_next(x, y) \
44
                (void)((x)->prolog.next = (struct hash_chain_entry *)(y))
45
    word dl_hidden_obj;         /* Pointer to object base       */
46
};
47

48
struct finalizable_object {
49
    struct hash_chain_entry prolog;
50
#   define fo_hidden_base prolog.hidden_key
51
                                /* Pointer to object base.      */
52
                                /* No longer hidden once object */
53
                                /* is on finalize_now queue.    */
54
#   define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
55
#   define fo_set_next(x,y) ((x)->prolog.next = (struct hash_chain_entry *)(y))
56
    GC_finalization_proc fo_fn; /* Finalizer.                   */
57
    ptr_t fo_client_data;
58
    word fo_object_size;        /* In bytes.                    */
59
    finalization_mark_proc fo_mark_proc;        /* Mark-through procedure */
60
};
61

62
#ifdef AO_HAVE_store
63
  /* Update finalize_now atomically as GC_should_invoke_finalizers does */
64
  /* not acquire the allocation lock.                                   */
65
# define SET_FINALIZE_NOW(fo) \
66
            AO_store((volatile AO_t *)&GC_fnlz_roots.finalize_now, (AO_t)(fo))
67
#else
68
# define SET_FINALIZE_NOW(fo) (void)(GC_fnlz_roots.finalize_now = (fo))
69
#endif /* !THREADS */
70

71
GC_API void GC_CALL GC_push_finalizer_structures(void)
29,449✔
72
{
73
  GC_ASSERT((word)(&GC_dl_hashtbl.head) % sizeof(word) == 0);
74
  GC_ASSERT((word)(&GC_fnlz_roots) % sizeof(word) == 0);
75
# ifndef GC_LONG_REFS_NOT_NEEDED
76
    GC_ASSERT((word)(&GC_ll_hashtbl.head) % sizeof(word) == 0);
77
    GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
29,449✔
78
# endif
79
  GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
29,449✔
80
  GC_PUSH_ALL_SYM(GC_fnlz_roots);
29,449✔
81
  /* GC_toggleref_arr is pushed specially by GC_mark_togglerefs.        */
82
}
29,449✔
83

84
/* Threshold of log_size to initiate full collection before growing     */
85
/* a hash table.                                                        */
86
#ifndef GC_ON_GROW_LOG_SIZE_MIN
87
# define GC_ON_GROW_LOG_SIZE_MIN CPP_LOG_HBLKSIZE
88
#endif
89

90
/* Double the size of a hash table. *log_size_ptr is the log of its     */
91
/* current size.  May be a no-op.  *table is a pointer to an array of   */
92
/* hash headers.  We update both *table and *log_size_ptr on success.   */
93
STATIC void GC_grow_table(struct hash_chain_entry ***table,
1,202✔
94
                          unsigned *log_size_ptr, const word *entries_ptr)
95
{
96
    word i;
97
    struct hash_chain_entry *p;
98
    unsigned log_old_size = *log_size_ptr;
1,202✔
99
    unsigned log_new_size = log_old_size + 1;
1,202✔
100
    word old_size = *table == NULL ? 0 : (word)1 << log_old_size;
1,202✔
101
    word new_size = (word)1 << log_new_size;
1,202✔
102
    /* FIXME: Power of 2 size often gets rounded up to one more page. */
103
    struct hash_chain_entry **new_table;
104

105
    GC_ASSERT(I_HOLD_LOCK());
1,202✔
106
    /* Avoid growing the table in case of at least 25% of entries can   */
107
    /* be deleted by enforcing a collection.  Ignored for small tables. */
108
    /* In incremental mode we skip this optimization, as we want to     */
109
    /* avoid triggering a full GC whenever possible.                    */
110
    if (log_old_size >= GC_ON_GROW_LOG_SIZE_MIN && !GC_incremental) {
1,202✔
111
      IF_CANCEL(int cancel_state;)
112

113
      DISABLE_CANCEL(cancel_state);
50✔
114
      GC_gcollect_inner();
50✔
115
      RESTORE_CANCEL(cancel_state);
50✔
116
      /* GC_finalize might decrease entries value.  */
117
      if (*entries_ptr < ((word)1 << log_old_size) - (*entries_ptr >> 2))
50✔
118
        return;
42✔
119
    }
120

121
    new_table = (struct hash_chain_entry **)
1,160✔
122
                    GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
1,160✔
123
                        (size_t)new_size * sizeof(struct hash_chain_entry *),
124
                        NORMAL);
125
    if (new_table == 0) {
1,160✔
126
        if (*table == 0) {
×
127
            ABORT("Insufficient space for initial table allocation");
×
128
        } else {
129
            return;
×
130
        }
131
    }
132
    for (i = 0; i < old_size; i++) {
702,880✔
133
      p = (*table)[i];
701,720✔
134
      while (p != 0) {
2,095,744✔
135
        ptr_t real_key = (ptr_t)GC_REVEAL_POINTER(p->hidden_key);
692,304✔
136
        struct hash_chain_entry *next = p -> next;
692,304✔
137
        size_t new_hash = HASH3(real_key, new_size, log_new_size);
692,304✔
138

139
        p -> next = new_table[new_hash];
692,304✔
140
        GC_dirty(p);
692,304✔
141
        new_table[new_hash] = p;
692,304✔
142
        p = next;
692,304✔
143
      }
144
    }
145
    *log_size_ptr = log_new_size;
1,160✔
146
    *table = new_table;
1,160✔
147
    GC_dirty(new_table); /* entire object */
1,160✔
148
}
149

150
GC_API int GC_CALL GC_register_disappearing_link(void * * link)
42✔
151
{
152
    ptr_t base;
153

154
    base = (ptr_t)GC_base(link);
42✔
155
    if (base == 0)
42✔
156
        ABORT("Bad arg to GC_register_disappearing_link");
×
157
    return GC_general_register_disappearing_link(link, base);
42✔
158
}
159

160
STATIC int GC_register_disappearing_link_inner(
743,058✔
161
                        struct dl_hashtbl_s *dl_hashtbl, void **link,
162
                        const void *obj, const char *tbl_log_name)
163
{
164
    struct disappearing_link *curr_dl;
165
    size_t index;
166
    struct disappearing_link * new_dl;
167

168
    GC_ASSERT(GC_is_initialized);
743,058✔
169
    if (EXPECT(GC_find_leak, FALSE)) return GC_UNIMPLEMENTED;
743,058✔
170
#   ifdef GC_ASSERTIONS
171
      GC_noop1((word)(*link)); /* check accessibility */
743,058✔
172
#   endif
173
    LOCK();
743,058✔
174
    GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
743,058✔
175
    if (EXPECT(NULL == dl_hashtbl -> head, FALSE)
743,058✔
176
        || EXPECT(dl_hashtbl -> entries
743,012✔
177
                  > ((word)1 << dl_hashtbl -> log_size), FALSE)) {
178
        GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head,
816✔
179
                      &dl_hashtbl -> log_size, &dl_hashtbl -> entries);
816✔
180
        GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name,
816✔
181
                           1U << dl_hashtbl -> log_size);
×
182
    }
183
    index = HASH2(link, dl_hashtbl -> log_size);
743,058✔
184
    for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
1,497,173✔
185
         curr_dl = dl_next(curr_dl)) {
11,057✔
186
        if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
11,057✔
187
            curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
×
188
            UNLOCK();
×
189
            return GC_DUPLICATE;
×
190
        }
191
    }
192
    new_dl = (struct disappearing_link *)
743,058✔
193
        GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
194
    if (EXPECT(NULL == new_dl, FALSE)) {
743,058✔
195
      GC_oom_func oom_fn = GC_oom_fn;
×
196
      UNLOCK();
×
197
      new_dl = (struct disappearing_link *)
×
198
                (*oom_fn)(sizeof(struct disappearing_link));
199
      if (0 == new_dl) {
×
200
        return GC_NO_MEMORY;
×
201
      }
202
      /* It's not likely we'll make it here, but ... */
203
      LOCK();
×
204
      /* Recalculate index since the table may grow.    */
205
      index = HASH2(link, dl_hashtbl -> log_size);
×
206
      /* Check again that our disappearing link not in the table. */
207
      for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
×
208
           curr_dl = dl_next(curr_dl)) {
×
209
        if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
×
210
          curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
×
211
          UNLOCK();
×
212
#         ifndef DBG_HDRS_ALL
213
            /* Free unused new_dl returned by GC_oom_fn() */
214
            GC_free((void *)new_dl);
×
215
#         endif
216
          return GC_DUPLICATE;
×
217
        }
218
      }
219
    }
220
    new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
743,058✔
221
    new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
743,058✔
222
    dl_set_next(new_dl, dl_hashtbl -> head[index]);
743,058✔
223
    GC_dirty(new_dl);
743,058✔
224
    dl_hashtbl -> head[index] = new_dl;
743,058✔
225
    dl_hashtbl -> entries++;
743,058✔
226
    GC_dirty(dl_hashtbl->head + index);
743,058✔
227
    UNLOCK();
743,058✔
228
    return GC_SUCCESS;
743,058✔
229
}
230

231
GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
372,222✔
232
                                                         const void * obj)
233
{
234
    if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
372,222✔
235
        ABORT("Bad arg to GC_general_register_disappearing_link");
×
236
    return GC_register_disappearing_link_inner(&GC_dl_hashtbl, link, obj,
372,222✔
237
                                               "dl");
238
}
239

240
#ifdef DBG_HDRS_ALL
241
# define FREE_DL_ENTRY(curr_dl) dl_set_next(curr_dl, NULL)
242
#else
243
# define FREE_DL_ENTRY(curr_dl) GC_free(curr_dl)
244
#endif
245

246
/* Unregisters given link and returns the link entry to free.   */
247
GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
370,836✔
248
                                struct dl_hashtbl_s *dl_hashtbl, void **link)
249
{
250
    struct disappearing_link *curr_dl;
251
    struct disappearing_link *prev_dl = NULL;
370,836✔
252
    size_t index;
253

254
    GC_ASSERT(I_HOLD_LOCK());
370,836✔
255
    if (EXPECT(NULL == dl_hashtbl -> head, FALSE)) return NULL;
370,836✔
256

257
    index = HASH2(link, dl_hashtbl -> log_size);
370,836✔
258
    for (curr_dl = dl_hashtbl -> head[index]; curr_dl;
741,672✔
259
         curr_dl = dl_next(curr_dl)) {
×
260
        if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
370,836✔
261
            /* Remove found entry from the table. */
262
            if (NULL == prev_dl) {
370,836✔
263
                dl_hashtbl -> head[index] = dl_next(curr_dl);
370,836✔
264
                GC_dirty(dl_hashtbl->head + index);
370,836✔
265
            } else {
266
                dl_set_next(prev_dl, dl_next(curr_dl));
×
267
                GC_dirty(prev_dl);
×
268
            }
269
            dl_hashtbl -> entries--;
370,836✔
270
            break;
370,836✔
271
        }
272
        prev_dl = curr_dl;
×
273
    }
274
    return curr_dl;
370,836✔
275
}
276

277
GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
185,418✔
278
{
279
    struct disappearing_link *curr_dl;
280

281
    if (((word)link & (ALIGNMENT-1)) != 0) return 0; /* Nothing to do. */
185,418✔
282

283
    LOCK();
185,418✔
284
    curr_dl = GC_unregister_disappearing_link_inner(&GC_dl_hashtbl, link);
185,418✔
285
    UNLOCK();
185,418✔
286
    if (NULL == curr_dl) return 0;
185,418✔
287
    FREE_DL_ENTRY(curr_dl);
185,418✔
288
    return 1;
185,418✔
289
}
290

291
/* Mark from one finalizable object using the specified mark proc.      */
292
/* May not mark the object pointed to by real_ptr (i.e, it is the job   */
293
/* of the caller, if appropriate).  Note that this is called with the   */
294
/* mutator running.  This is safe only if the mutator (client) gets     */
295
/* the allocation lock to reveal hidden pointers.                       */
296
GC_INLINE void GC_mark_fo(ptr_t real_ptr, finalization_mark_proc mark_proc)
8,884,096✔
297
{
298
  GC_ASSERT(I_HOLD_LOCK());
8,884,096✔
299
  mark_proc(real_ptr);
8,884,096✔
300
  /* Process objects pushed by the mark procedure.      */
301
  while (!GC_mark_stack_empty())
22,247,492✔
302
    MARK_FROM_MARK_STACK();
4,479,300✔
303
}
8,884,096✔
304

305
/* Complete a collection in progress, if any.   */
306
GC_INLINE void GC_complete_ongoing_collection(void) {
×
307
  if (EXPECT(GC_collection_in_progress(), FALSE)) {
×
308
    while (!GC_mark_some(NULL)) { /* empty */ }
×
309
  }
310
}
×
311

312
/* Toggle-ref support.  */
313
#ifndef GC_TOGGLE_REFS_NOT_NEEDED
314
  typedef union toggle_ref_u GCToggleRef;
315

316
  STATIC GC_toggleref_func GC_toggleref_callback = 0;
317

318
  GC_INNER void GC_process_togglerefs(void)
29,449✔
319
  {
320
    size_t i;
321
    size_t new_size = 0;
29,449✔
322
    GC_bool needs_barrier = FALSE;
29,449✔
323

324
    GC_ASSERT(I_HOLD_LOCK());
29,449✔
325
    for (i = 0; i < GC_toggleref_array_size; ++i) {
29,449✔
326
      GCToggleRef r = GC_toggleref_arr[i];
×
327
      void *obj = r.strong_ref;
×
328

329
      if (((word)obj & 1) != 0) {
×
330
        obj = GC_REVEAL_POINTER(r.weak_ref);
×
331
      }
332
      if (NULL == obj) {
×
333
        continue;
×
334
      }
335
      switch (GC_toggleref_callback(obj)) {
×
336
      case GC_TOGGLE_REF_DROP:
337
        break;
×
338
      case GC_TOGGLE_REF_STRONG:
339
        GC_toggleref_arr[new_size++].strong_ref = obj;
×
340
        needs_barrier = TRUE;
×
341
        break;
×
342
      case GC_TOGGLE_REF_WEAK:
343
        GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
×
344
        break;
×
345
      default:
346
        ABORT("Bad toggle-ref status returned by callback");
×
347
      }
348
    }
349

350
    if (new_size < GC_toggleref_array_size) {
29,449✔
351
      BZERO(&GC_toggleref_arr[new_size],
×
352
            (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
353
      GC_toggleref_array_size = new_size;
×
354
    }
355
    if (needs_barrier)
29,449✔
356
      GC_dirty(GC_toggleref_arr); /* entire object */
×
357
  }
29,449✔
358

359
  STATIC void GC_normal_finalize_mark_proc(ptr_t);
360

361
  STATIC void GC_mark_togglerefs(void)
29,460✔
362
  {
363
    size_t i;
364

365
    GC_ASSERT(I_HOLD_LOCK());
29,460✔
366
    if (NULL == GC_toggleref_arr)
29,460✔
367
      return;
29,460✔
368

369
    GC_set_mark_bit(GC_toggleref_arr);
×
370
    for (i = 0; i < GC_toggleref_array_size; ++i) {
×
371
      void *obj = GC_toggleref_arr[i].strong_ref;
×
372
      if (obj != NULL && ((word)obj & 1) == 0) {
×
373
        /* Push and mark the object.    */
374
        GC_mark_fo((ptr_t)obj, GC_normal_finalize_mark_proc);
×
375
        GC_set_mark_bit(obj);
×
376
        GC_complete_ongoing_collection();
×
377
      }
378
    }
379
  }
380

381
  STATIC void GC_clear_togglerefs(void)
29,460✔
382
  {
383
    size_t i;
384
    for (i = 0; i < GC_toggleref_array_size; ++i) {
29,460✔
385
      if ((GC_toggleref_arr[i].weak_ref & 1) != 0) {
×
386
        if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))) {
×
387
          GC_toggleref_arr[i].weak_ref = 0;
×
388
        } else {
389
          /* No need to copy, BDWGC is a non-moving collector.    */
390
        }
391
      }
392
    }
393
  }
29,460✔
394

395
  GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func fn)
2✔
396
  {
397
    LOCK();
2✔
398
    GC_toggleref_callback = fn;
2✔
399
    UNLOCK();
2✔
400
  }
2✔
401

402
  GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void)
2✔
403
  {
404
    GC_toggleref_func fn;
405

406
    LOCK();
2✔
407
    fn = GC_toggleref_callback;
2✔
408
    UNLOCK();
2✔
409
    return fn;
2✔
410
  }
411

412
  static GC_bool ensure_toggleref_capacity(size_t capacity_inc)
×
413
  {
414
    GC_ASSERT(I_HOLD_LOCK());
×
415
    if (NULL == GC_toggleref_arr) {
×
416
      GC_toggleref_array_capacity = 32; /* initial capacity */
×
417
      GC_toggleref_arr = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
×
418
                        GC_toggleref_array_capacity * sizeof(GCToggleRef),
419
                        NORMAL);
420
      if (NULL == GC_toggleref_arr)
×
421
        return FALSE;
×
422
    }
423
    if (GC_toggleref_array_size + capacity_inc
×
424
        >= GC_toggleref_array_capacity) {
×
425
      GCToggleRef *new_array;
426
      while (GC_toggleref_array_capacity
×
427
              < GC_toggleref_array_size + capacity_inc) {
×
428
        GC_toggleref_array_capacity *= 2;
×
429
        if ((GC_toggleref_array_capacity
×
430
             & ((size_t)1 << (sizeof(size_t) * 8 - 1))) != 0)
×
431
          return FALSE; /* overflow */
×
432
      }
433

434
      new_array = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
×
435
                        GC_toggleref_array_capacity * sizeof(GCToggleRef),
436
                        NORMAL);
437
      if (NULL == new_array)
×
438
        return FALSE;
×
439
      if (EXPECT(GC_toggleref_array_size > 0, TRUE))
×
440
        BCOPY(GC_toggleref_arr, new_array,
×
441
              GC_toggleref_array_size * sizeof(GCToggleRef));
442
      GC_INTERNAL_FREE(GC_toggleref_arr);
×
443
      GC_toggleref_arr = new_array;
×
444
    }
445
    return TRUE;
×
446
  }
447

448
  GC_API int GC_CALL GC_toggleref_add(void *obj, int is_strong_ref)
42✔
449
  {
450
    int res = GC_SUCCESS;
42✔
451

452
    GC_ASSERT(NONNULL_ARG_NOT_NULL(obj));
42✔
453
    LOCK();
42✔
454
    if (GC_toggleref_callback != 0) {
42✔
455
      if (!ensure_toggleref_capacity(1)) {
×
456
        res = GC_NO_MEMORY;
×
457
      } else {
458
        GC_toggleref_arr[GC_toggleref_array_size].strong_ref =
×
459
                        is_strong_ref ? obj : (void *)GC_HIDE_POINTER(obj);
×
460
        if (is_strong_ref)
×
461
          GC_dirty(GC_toggleref_arr + GC_toggleref_array_size);
×
462
        GC_toggleref_array_size++;
×
463
      }
464
    }
465
    UNLOCK();
42✔
466
    return res;
42✔
467
  }
468
#endif /* !GC_TOGGLE_REFS_NOT_NEEDED */
469

470
/* Finalizer callback support. */
471
STATIC GC_await_finalize_proc GC_object_finalized_proc = 0;
472

473
GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc fn)
2✔
474
{
475
  LOCK();
2✔
476
  GC_object_finalized_proc = fn;
2✔
477
  UNLOCK();
2✔
478
}
2✔
479

480
GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
2✔
481
{
482
  GC_await_finalize_proc fn;
483

484
  LOCK();
2✔
485
  fn = GC_object_finalized_proc;
2✔
486
  UNLOCK();
2✔
487
  return fn;
2✔
488
}
489

490
#ifndef GC_LONG_REFS_NOT_NEEDED
491
  GC_API int GC_CALL GC_register_long_link(void * * link, const void * obj)
370,836✔
492
  {
493
    if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
370,836✔
494
        ABORT("Bad arg to GC_register_long_link");
×
495
    return GC_register_disappearing_link_inner(&GC_ll_hashtbl, link, obj,
370,836✔
496
                                               "long dl");
497
  }
498

499
  GC_API int GC_CALL GC_unregister_long_link(void * * link)
185,418✔
500
  {
501
    struct disappearing_link *curr_dl;
502

503
    if (((word)link & (ALIGNMENT-1)) != 0) return 0; /* Nothing to do. */
185,418✔
504

505
    LOCK();
185,418✔
506
    curr_dl = GC_unregister_disappearing_link_inner(&GC_ll_hashtbl, link);
185,418✔
507
    UNLOCK();
185,418✔
508
    if (NULL == curr_dl) return 0;
185,418✔
509
    FREE_DL_ENTRY(curr_dl);
185,418✔
510
    return 1;
185,418✔
511
  }
512
#endif /* !GC_LONG_REFS_NOT_NEEDED */
513

514
#ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED
515
  STATIC int GC_move_disappearing_link_inner(
1,112,508✔
516
                                struct dl_hashtbl_s *dl_hashtbl,
517
                                void **link, void **new_link)
518
  {
519
    struct disappearing_link *curr_dl, *new_dl;
520
    struct disappearing_link *prev_dl = NULL;
1,112,508✔
521
    size_t curr_index, new_index;
522
    word curr_hidden_link, new_hidden_link;
523

524
#   ifdef GC_ASSERTIONS
525
      GC_noop1((word)(*new_link));
1,112,508✔
526
#   endif
527
    GC_ASSERT(I_HOLD_LOCK());
1,112,508✔
528
    if (EXPECT(NULL == dl_hashtbl -> head, FALSE)) return GC_NOT_FOUND;
1,112,508✔
529

530
    /* Find current link.       */
531
    curr_index = HASH2(link, dl_hashtbl -> log_size);
1,112,508✔
532
    curr_hidden_link = GC_HIDE_POINTER(link);
1,112,508✔
533
    for (curr_dl = dl_hashtbl -> head[curr_index]; curr_dl;
2,229,986✔
534
         curr_dl = dl_next(curr_dl)) {
4,970✔
535
      if (curr_dl -> dl_hidden_link == curr_hidden_link)
746,642✔
536
        break;
741,672✔
537
      prev_dl = curr_dl;
4,970✔
538
    }
539
    if (EXPECT(NULL == curr_dl, FALSE)) {
1,112,508✔
540
      return GC_NOT_FOUND;
370,836✔
541
    } else if (link == new_link) {
741,672✔
542
      return GC_SUCCESS; /* Nothing to do.      */
370,836✔
543
    }
544

545
    /* link found; now check new_link not present.      */
546
    new_index = HASH2(new_link, dl_hashtbl -> log_size);
370,836✔
547
    new_hidden_link = GC_HIDE_POINTER(new_link);
370,836✔
548
    for (new_dl = dl_hashtbl -> head[new_index]; new_dl;
980,949✔
549
         new_dl = dl_next(new_dl)) {
239,277✔
550
      if (new_dl -> dl_hidden_link == new_hidden_link) {
239,277✔
551
        /* Target already registered; bail.     */
552
        return GC_DUPLICATE;
×
553
      }
554
    }
555

556
    /* Remove from old, add to new, update link.        */
557
    if (NULL == prev_dl) {
370,836✔
558
      dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
370,836✔
559
    } else {
560
      dl_set_next(prev_dl, dl_next(curr_dl));
×
561
      GC_dirty(prev_dl);
×
562
    }
563
    curr_dl -> dl_hidden_link = new_hidden_link;
370,836✔
564
    dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
370,836✔
565
    dl_hashtbl -> head[new_index] = curr_dl;
370,836✔
566
    GC_dirty(curr_dl);
370,836✔
567
    GC_dirty(dl_hashtbl->head); /* entire object */
370,836✔
568
    return GC_SUCCESS;
370,836✔
569
  }
570

571
  GC_API int GC_CALL GC_move_disappearing_link(void **link, void **new_link)
556,253✔
572
  {
573
    int result;
574

575
    if (((word)new_link & (ALIGNMENT-1)) != 0
556,253✔
576
        || !NONNULL_ARG_NOT_NULL(new_link))
556,253✔
577
      ABORT("Bad new_link arg to GC_move_disappearing_link");
×
578
    if (((word)link & (ALIGNMENT-1)) != 0)
556,253✔
579
      return GC_NOT_FOUND; /* Nothing to do. */
×
580

581
    LOCK();
556,253✔
582
    result = GC_move_disappearing_link_inner(&GC_dl_hashtbl, link, new_link);
556,254✔
583
    UNLOCK();
556,254✔
584
    return result;
556,254✔
585
  }
586

587
# ifndef GC_LONG_REFS_NOT_NEEDED
588
    GC_API int GC_CALL GC_move_long_link(void **link, void **new_link)
556,252✔
589
    {
590
      int result;
591

592
      if (((word)new_link & (ALIGNMENT-1)) != 0
556,252✔
593
          || !NONNULL_ARG_NOT_NULL(new_link))
556,253✔
594
        ABORT("Bad new_link arg to GC_move_long_link");
595
      if (((word)link & (ALIGNMENT-1)) != 0)
556,253✔
596
        return GC_NOT_FOUND; /* Nothing to do. */
×
597

598
      LOCK();
556,253✔
599
      result = GC_move_disappearing_link_inner(&GC_ll_hashtbl, link, new_link);
556,254✔
600
      UNLOCK();
556,254✔
601
      return result;
556,254✔
602
    }
603
# endif /* !GC_LONG_REFS_NOT_NEEDED */
604
#endif /* !GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED */
605

606
/* Possible finalization_marker procedures.  Note that mark stack       */
607
/* overflow is handled by the caller, and is not a disaster.            */
608
#if defined(_MSC_VER) && defined(I386)
609
  GC_ATTR_NOINLINE
610
  /* Otherwise some optimizer bug is tickled in VC for x86 (v19, at least). */
611
#endif
612
STATIC void GC_normal_finalize_mark_proc(ptr_t p)
4,389,130✔
613
{
614
    GC_mark_stack_top = GC_push_obj(p, HDR(p), GC_mark_stack_top,
4,389,130✔
615
                                    GC_mark_stack + GC_mark_stack_size);
4,389,130✔
616
}
4,389,130✔
617

618
/* This only pays very partial attention to the mark descriptor.        */
619
/* It does the right thing for normal and atomic objects, and treats    */
620
/* most others as normal.                                               */
621
STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
300,662✔
622
{
623
    hdr * hhdr = HDR(p);
300,662✔
624
    word descr = hhdr -> hb_descr;
300,662✔
625
    ptr_t current_p;
626
    ptr_t scan_limit;
627
    ptr_t target_limit = p + hhdr -> hb_sz - 1;
300,662✔
628

629
    if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
300,662✔
630
       scan_limit = p + descr - sizeof(word);
300,662✔
631
    } else {
632
       scan_limit = target_limit + 1 - sizeof(word);
×
633
    }
634
    for (current_p = p; (word)current_p <= (word)scan_limit;
2,021,100✔
635
         current_p += ALIGNMENT) {
1,419,776✔
636
        word q;
637

638
        LOAD_WORD_OR_CONTINUE(q, current_p);
1,419,776✔
639
        if (q < (word)p || q > (word)target_limit) {
1,419,776✔
640
            GC_PUSH_ONE_HEAP(q, current_p, GC_mark_stack_top);
1,419,776✔
641
        }
642
    }
643
}
300,662✔
644

645
STATIC void GC_null_finalize_mark_proc(ptr_t p)
4,194,304✔
646
{
647
    UNUSED_ARG(p);
648
}
4,194,304✔
649

650
/* Possible finalization_marker procedures.  Note that mark stack       */
651
/* overflow is handled by the caller, and is not a disaster.            */
652

653
/* GC_unreachable_finalize_mark_proc is an alias for normal marking,    */
654
/* but it is explicitly tested for, and triggers different              */
655
/* behavior.  Objects registered in this way are not finalized          */
656
/* if they are reachable by other finalizable objects, even if those    */
657
/* other objects specify no ordering.                                   */
658
STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
882✔
659
{
660
    /* A dummy comparison to ensure the compiler not to optimize two    */
661
    /* identical functions into a single one (thus, to ensure a unique  */
662
    /* address of each).  Alternatively, GC_noop1(p) could be used.     */
663
    if (EXPECT(NULL == p, FALSE)) return;
882✔
664

665
    GC_normal_finalize_mark_proc(p);
882✔
666
}
667

668
static GC_bool need_unreachable_finalization = FALSE;
669
        /* Avoid the work if this is not used.  */
670
        /* TODO: turn need_unreachable_finalization into a counter */
671

672
/* Register a finalization function.  See gc.h for details.     */
673
/* The last parameter is a procedure that determines            */
674
/* marking for finalization ordering.  Any objects marked       */
675
/* by that procedure will be guaranteed to not have been        */
676
/* finalized when this finalizer is invoked.                    */
677
STATIC void GC_register_finalizer_inner(void * obj,
4,827,750✔
678
                                        GC_finalization_proc fn, void *cd,
679
                                        GC_finalization_proc *ofn, void **ocd,
680
                                        finalization_mark_proc mp)
681
{
682
    struct finalizable_object * curr_fo;
683
    size_t index;
684
    struct finalizable_object *new_fo = 0;
4,827,750✔
685
    hdr *hhdr = NULL; /* initialized to prevent warning. */
4,827,750✔
686

687
    GC_ASSERT(GC_is_initialized);
4,827,750✔
688
    if (EXPECT(GC_find_leak, FALSE)) {
4,827,750✔
689
      /* No-op.  *ocd and *ofn remain unchanged.    */
690
      return;
×
691
    }
692
    LOCK();
4,827,750✔
693
    GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
4,827,750✔
694
    if (mp == GC_unreachable_finalize_mark_proc)
4,827,750✔
695
        need_unreachable_finalization = TRUE;
42✔
696
    if (EXPECT(NULL == GC_fnlz_roots.fo_head, FALSE)
4,827,750✔
697
        || EXPECT(GC_fo_entries > ((word)1 << GC_log_fo_table_size), FALSE)) {
4,827,742✔
698
        GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
386✔
699
                      &GC_log_fo_table_size, &GC_fo_entries);
700
        GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
386✔
701
                           1U << GC_log_fo_table_size);
×
702
    }
703
    for (;;) {
704
      struct finalizable_object *prev_fo = NULL;
4,827,750✔
705
      GC_oom_func oom_fn;
706

707
      index = HASH2(obj, GC_log_fo_table_size);
4,827,750✔
708
      curr_fo = GC_fnlz_roots.fo_head[index];
4,827,750✔
709
      while (curr_fo != 0) {
11,980,649✔
710
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
2,354,549✔
711
        if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(obj)) {
2,354,549✔
712
          /* Interruption by a signal in the middle of this     */
713
          /* should be safe.  The client may see only *ocd      */
714
          /* updated, but we'll declare that to be his problem. */
715
          if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
29,400✔
716
          if (ofn) *ofn = curr_fo -> fo_fn;
29,400✔
717
          /* Delete the structure for obj.      */
718
          if (prev_fo == 0) {
29,400✔
719
            GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
29,398✔
720
          } else {
721
            fo_set_next(prev_fo, fo_next(curr_fo));
2✔
722
            GC_dirty(prev_fo);
2✔
723
          }
724
          if (fn == 0) {
29,400✔
725
            GC_fo_entries--;
1,400✔
726
            /* May not happen if we get a signal.  But a high   */
727
            /* estimate will only make the table larger than    */
728
            /* necessary.                                       */
729
#           if !defined(THREADS) && !defined(DBG_HDRS_ALL)
730
              GC_free((void *)curr_fo);
731
#           endif
732
          } else {
733
            curr_fo -> fo_fn = fn;
28,000✔
734
            curr_fo -> fo_client_data = (ptr_t)cd;
28,000✔
735
            curr_fo -> fo_mark_proc = mp;
28,000✔
736
            GC_dirty(curr_fo);
28,000✔
737
            /* Reinsert it.  We deleted it first to maintain    */
738
            /* consistency in the event of a signal.            */
739
            if (prev_fo == 0) {
28,000✔
740
              GC_fnlz_roots.fo_head[index] = curr_fo;
28,000✔
741
            } else {
742
              fo_set_next(prev_fo, curr_fo);
×
743
              GC_dirty(prev_fo);
×
744
            }
745
          }
746
          if (NULL == prev_fo)
29,400✔
747
            GC_dirty(GC_fnlz_roots.fo_head + index);
29,398✔
748
          UNLOCK();
29,400✔
749
#         ifndef DBG_HDRS_ALL
750
              /* Free unused new_fo returned by GC_oom_fn() */
751
              GC_free((void *)new_fo);
29,400✔
752
#         endif
753
          return;
29,400✔
754
        }
755
        prev_fo = curr_fo;
2,325,149✔
756
        curr_fo = fo_next(curr_fo);
2,325,149✔
757
      }
758
      if (EXPECT(new_fo != 0, FALSE)) {
4,798,350✔
759
        /* new_fo is returned by GC_oom_fn().   */
760
        GC_ASSERT(fn != 0);
×
761
#       ifdef LINT2
762
          if (NULL == hhdr) ABORT("Bad hhdr in GC_register_finalizer_inner");
763
#       endif
764
        break;
×
765
      }
766
      if (fn == 0) {
4,798,350✔
767
        if (ocd) *ocd = 0;
208,582✔
768
        if (ofn) *ofn = 0;
208,582✔
769
        UNLOCK();
208,582✔
770
        return;
208,582✔
771
      }
772
      GET_HDR(obj, hhdr);
4,589,768✔
773
      if (EXPECT(0 == hhdr, FALSE)) {
4,589,768✔
774
        /* We won't collect it, hence finalizer wouldn't be run. */
775
        if (ocd) *ocd = 0;
×
776
        if (ofn) *ofn = 0;
×
777
        UNLOCK();
×
778
        return;
×
779
      }
780
      new_fo = (struct finalizable_object *)
4,589,768✔
781
        GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
782
      if (EXPECT(new_fo != 0, TRUE))
4,589,768✔
783
        break;
4,589,768✔
784
      oom_fn = GC_oom_fn;
×
785
      UNLOCK();
×
786
      new_fo = (struct finalizable_object *)
×
787
                (*oom_fn)(sizeof(struct finalizable_object));
788
      if (0 == new_fo) {
×
789
        /* No enough memory.  *ocd and *ofn remain unchanged.   */
790
        return;
×
791
      }
792
      /* It's not likely we'll make it here, but ... */
793
      LOCK();
×
794
      /* Recalculate index since the table may grow and         */
795
      /* check again that our finalizer is not in the table.    */
796
    }
×
797
    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
4,589,768✔
798
    if (ocd) *ocd = 0;
4,589,768✔
799
    if (ofn) *ofn = 0;
4,589,768✔
800
    new_fo -> fo_hidden_base = GC_HIDE_POINTER(obj);
4,589,768✔
801
    new_fo -> fo_fn = fn;
4,589,768✔
802
    new_fo -> fo_client_data = (ptr_t)cd;
4,589,768✔
803
    new_fo -> fo_object_size = hhdr -> hb_sz;
4,589,768✔
804
    new_fo -> fo_mark_proc = mp;
4,589,768✔
805
    fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
4,589,768✔
806
    GC_dirty(new_fo);
4,589,768✔
807
    GC_fo_entries++;
4,589,768✔
808
    GC_fnlz_roots.fo_head[index] = new_fo;
4,589,768✔
809
    GC_dirty(GC_fnlz_roots.fo_head + index);
4,589,768✔
810
    UNLOCK();
4,589,768✔
811
}
812

813
GC_API void GC_CALL GC_register_finalizer(void * obj,
185,422✔
814
                                  GC_finalization_proc fn, void * cd,
815
                                  GC_finalization_proc *ofn, void ** ocd)
816
{
817
    GC_register_finalizer_inner(obj, fn, cd, ofn,
185,422✔
818
                                ocd, GC_normal_finalize_mark_proc);
819
}
185,422✔
820

821
GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
447,982✔
822
                               GC_finalization_proc fn, void * cd,
823
                               GC_finalization_proc *ofn, void ** ocd)
824
{
825
    GC_register_finalizer_inner(obj, fn, cd, ofn,
447,982✔
826
                                ocd, GC_ignore_self_finalize_mark_proc);
827
}
447,982✔
828

829
GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
4,194,304✔
830
                               GC_finalization_proc fn, void * cd,
831
                               GC_finalization_proc *ofn, void ** ocd)
832
{
833
    GC_register_finalizer_inner(obj, fn, cd, ofn,
4,194,304✔
834
                                ocd, GC_null_finalize_mark_proc);
835
}
4,194,304✔
836

837
GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
42✔
838
                               GC_finalization_proc fn, void * cd,
839
                               GC_finalization_proc *ofn, void ** ocd)
840
{
841
    GC_ASSERT(GC_java_finalization);
42✔
842
    GC_register_finalizer_inner(obj, fn, cd, ofn,
42✔
843
                                ocd, GC_unreachable_finalize_mark_proc);
844
}
42✔
845

846
#ifndef NO_DEBUGGING
847
  STATIC void GC_dump_finalization_links(
×
848
                                const struct dl_hashtbl_s *dl_hashtbl)
849
  {
850
    size_t dl_size = (size_t)1 << dl_hashtbl -> log_size;
×
851
    size_t i;
852

853
    if (NULL == dl_hashtbl -> head) return; /* empty table  */
×
854

855
    for (i = 0; i < dl_size; i++) {
×
856
      struct disappearing_link *curr_dl;
857

858
      for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
×
859
           curr_dl = dl_next(curr_dl)) {
×
860
        ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_obj);
×
861
        ptr_t real_link = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_link);
×
862

863
        GC_printf("Object: %p, link value: %p, link addr: %p\n",
×
864
                  (void *)real_ptr, *(void **)real_link, (void *)real_link);
865
      }
866
    }
867
  }
868

869
  GC_API void GC_CALL GC_dump_finalization(void)
×
870
  {
871
    struct finalizable_object * curr_fo;
872
    size_t i;
873
    size_t fo_size = GC_fnlz_roots.fo_head == NULL ? 0 :
×
874
                                (size_t)1 << GC_log_fo_table_size;
×
875

876
    GC_printf("\n***Disappearing (short) links:\n");
×
877
    GC_dump_finalization_links(&GC_dl_hashtbl);
×
878
#   ifndef GC_LONG_REFS_NOT_NEEDED
879
      GC_printf("\n***Disappearing long links:\n");
×
880
      GC_dump_finalization_links(&GC_ll_hashtbl);
×
881
#   endif
882
    GC_printf("\n***Finalizers:\n");
×
883
    for (i = 0; i < fo_size; i++) {
×
884
      for (curr_fo = GC_fnlz_roots.fo_head[i];
×
885
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
×
886
        ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
×
887

888
        GC_printf("Finalizable object: %p\n", (void *)real_ptr);
×
889
      }
890
    }
891
  }
×
892
#endif /* !NO_DEBUGGING */
893

894
#ifndef SMALL_CONFIG
895
  STATIC word GC_old_dl_entries = 0; /* for stats printing */
896
# ifndef GC_LONG_REFS_NOT_NEEDED
897
    STATIC word GC_old_ll_entries = 0;
898
# endif
899
#endif /* !SMALL_CONFIG */
900

901
#ifndef THREADS
902
  /* Global variables to minimize the level of recursion when a client  */
903
  /* finalizer allocates memory.                                        */
904
  STATIC int GC_finalizer_nested = 0;
905
                        /* Only the lowest byte is used, the rest is    */
906
                        /* padding for proper global data alignment     */
907
                        /* required for some compilers (like Watcom).   */
908
  STATIC unsigned GC_finalizer_skipped = 0;
909

910
  /* Checks and updates the level of finalizers recursion.              */
911
  /* Returns NULL if GC_invoke_finalizers() should not be called by the */
912
  /* collector (to minimize the risk of a deep finalizers recursion),   */
913
  /* otherwise returns a pointer to GC_finalizer_nested.                */
914
  STATIC unsigned char *GC_check_finalizer_nested(void)
915
  {
916
    unsigned nesting_level = *(unsigned char *)&GC_finalizer_nested;
917
    if (nesting_level) {
918
      /* We are inside another GC_invoke_finalizers().          */
919
      /* Skip some implicitly-called GC_invoke_finalizers()     */
920
      /* depending on the nesting (recursion) level.            */
921
      if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
922
      GC_finalizer_skipped = 0;
923
    }
924
    *(char *)&GC_finalizer_nested = (char)(nesting_level + 1);
925
    return (unsigned char *)&GC_finalizer_nested;
926
  }
927
#endif /* !THREADS */
928

929
GC_INLINE void GC_make_disappearing_links_disappear(
117,840✔
930
                                        struct dl_hashtbl_s* dl_hashtbl,
931
                                        GC_bool is_remove_dangling)
932
{
933
  size_t i;
934
  size_t dl_size = (size_t)1 << dl_hashtbl -> log_size;
117,840✔
935
  GC_bool needs_barrier = FALSE;
117,840✔
936

937
  GC_ASSERT(I_HOLD_LOCK());
117,840✔
938
  if (NULL == dl_hashtbl -> head) return; /* empty table  */
117,840✔
939

940
  for (i = 0; i < dl_size; i++) {
210,812,062✔
941
    struct disappearing_link *curr_dl, *next_dl;
942
    struct disappearing_link *prev_dl = NULL;
210,783,124✔
943

944
    for (curr_dl = dl_hashtbl->head[i]; curr_dl != NULL; curr_dl = next_dl) {
225,973,275✔
945
      next_dl = dl_next(curr_dl);
15,190,151✔
946
#     if defined(GC_ASSERTIONS) && !defined(THREAD_SANITIZER)
947
         /* Check accessibility of the location pointed by link. */
948
        GC_noop1(*(word *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link));
15,190,151✔
949
#     endif
950
      if (is_remove_dangling) {
15,190,151✔
951
        ptr_t real_link = (ptr_t)GC_base(GC_REVEAL_POINTER(
7,410,354✔
952
                                                curr_dl->dl_hidden_link));
953

954
        if (NULL == real_link || EXPECT(GC_is_marked(real_link), TRUE)) {
7,410,354✔
955
          prev_dl = curr_dl;
7,410,354✔
956
          continue;
7,410,354✔
957
        }
958
      } else {
959
        if (EXPECT(GC_is_marked((ptr_t)GC_REVEAL_POINTER(
7,779,797✔
960
                                        curr_dl->dl_hidden_obj)), TRUE)) {
961
          prev_dl = curr_dl;
7,410,354✔
962
          continue;
7,410,354✔
963
        }
964
        *(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link) = NULL;
369,443✔
965
      }
966

967
      /* Delete curr_dl entry from dl_hashtbl.  */
968
      if (NULL == prev_dl) {
369,443✔
969
        dl_hashtbl -> head[i] = next_dl;
368,873✔
970
        needs_barrier = TRUE;
368,873✔
971
      } else {
972
        dl_set_next(prev_dl, next_dl);
570✔
973
        GC_dirty(prev_dl);
570✔
974
      }
975
      GC_clear_mark_bit(curr_dl);
369,443✔
976
      dl_hashtbl -> entries--;
369,443✔
977
    }
978
  }
979
  if (needs_barrier)
28,938✔
980
    GC_dirty(dl_hashtbl -> head); /* entire object */
577✔
981
}
982

983
/* Cause disappearing links to disappear and unreachable objects to be  */
984
/* enqueued for finalization.  Called with the world running.           */
985
GC_INNER void GC_finalize(void)
29,460✔
986
{
987
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
988
    ptr_t real_ptr;
989
    size_t i;
990
    size_t fo_size = GC_fnlz_roots.fo_head == NULL ? 0 :
48,660✔
991
                                (size_t)1 << GC_log_fo_table_size;
19,200✔
992
    GC_bool needs_barrier = FALSE;
29,460✔
993

994
    GC_ASSERT(I_HOLD_LOCK());
29,460✔
995
#   ifndef SMALL_CONFIG
996
      /* Save current GC_[dl/ll]_entries value for stats printing */
997
      GC_old_dl_entries = GC_dl_hashtbl.entries;
29,460✔
998
#     ifndef GC_LONG_REFS_NOT_NEEDED
999
        GC_old_ll_entries = GC_ll_hashtbl.entries;
29,460✔
1000
#     endif
1001
#   endif
1002

1003
#   ifndef GC_TOGGLE_REFS_NOT_NEEDED
1004
      GC_mark_togglerefs();
29,460✔
1005
#   endif
1006
    GC_make_disappearing_links_disappear(&GC_dl_hashtbl, FALSE);
29,460✔
1007

1008
  /* Mark all objects reachable via chains of 1 or more pointers        */
1009
  /* from finalizable objects.                                          */
1010
    GC_ASSERT(!GC_collection_in_progress());
29,460✔
1011
    for (i = 0; i < fo_size; i++) {
200,678,832✔
1012
      for (curr_fo = GC_fnlz_roots.fo_head[i];
419,205,003✔
1013
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
17,906,259✔
1014
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
17,906,259✔
1015
        real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
17,906,259✔
1016
        if (!GC_is_marked(real_ptr)) {
17,906,259✔
1017
            GC_MARKED_FOR_FINALIZATION(real_ptr);
1018
            GC_mark_fo(real_ptr, curr_fo -> fo_mark_proc);
4,689,792✔
1019
            if (GC_is_marked(real_ptr)) {
4,689,792✔
1020
                WARN("Finalization cycle involving %p\n", real_ptr);
×
1021
            }
1022
        }
1023
      }
1024
    }
1025
  /* Enqueue for finalization all objects that are still                */
1026
  /* unreachable.                                                       */
1027
    GC_bytes_finalized = 0;
29,460✔
1028
    for (i = 0; i < fo_size; i++) {
200,678,832✔
1029
      curr_fo = GC_fnlz_roots.fo_head[i];
200,649,372✔
1030
      prev_fo = 0;
200,649,372✔
1031
      while (curr_fo != 0) {
419,205,003✔
1032
        real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
17,906,259✔
1033
        if (!GC_is_marked(real_ptr)) {
17,906,259✔
1034
            if (!GC_java_finalization) {
4,589,153✔
1035
              GC_set_mark_bit(real_ptr);
×
1036
            }
1037
            /* Delete from hash table */
1038
              next_fo = fo_next(curr_fo);
4,589,153✔
1039
              if (NULL == prev_fo) {
4,589,153✔
1040
                GC_fnlz_roots.fo_head[i] = next_fo;
3,704,726✔
1041
                if (GC_object_finalized_proc) {
3,704,726✔
1042
                  GC_dirty(GC_fnlz_roots.fo_head + i);
×
1043
                } else {
1044
                  needs_barrier = TRUE;
3,704,726✔
1045
                }
1046
              } else {
1047
                fo_set_next(prev_fo, next_fo);
884,427✔
1048
                GC_dirty(prev_fo);
884,427✔
1049
              }
1050
              GC_fo_entries--;
4,589,153✔
1051
              if (GC_object_finalized_proc)
4,589,153✔
1052
                GC_object_finalized_proc(real_ptr);
×
1053

1054
            /* Add to list of objects awaiting finalization.    */
1055
              fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
4,589,153✔
1056
              GC_dirty(curr_fo);
4,589,153✔
1057
              SET_FINALIZE_NOW(curr_fo);
4,589,153✔
1058
              /* unhide object pointer so any future collections will   */
1059
              /* see it.                                                */
1060
              curr_fo -> fo_hidden_base =
4,589,153✔
1061
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
4,589,153✔
1062
              GC_bytes_finalized +=
9,178,306✔
1063
                        curr_fo -> fo_object_size
4,589,153✔
1064
                        + sizeof(struct finalizable_object);
1065
            GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
4,589,153✔
1066
            curr_fo = next_fo;
4,589,153✔
1067
        } else {
1068
            prev_fo = curr_fo;
13,317,106✔
1069
            curr_fo = fo_next(curr_fo);
13,317,106✔
1070
        }
1071
      }
1072
    }
1073

1074
  if (GC_java_finalization) {
29,460✔
1075
    /* make sure we mark everything reachable from objects finalized
1076
       using the no_order mark_proc */
1077
      for (curr_fo = GC_fnlz_roots.finalize_now;
4,651,684✔
1078
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
4,592,764✔
1079
        real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
4,592,764✔
1080
        if (!GC_is_marked(real_ptr)) {
4,592,764✔
1081
            if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
4,589,153✔
1082
                GC_mark_fo(real_ptr, GC_normal_finalize_mark_proc);
4,194,304✔
1083
            }
1084
            if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
4,589,153✔
1085
                GC_set_mark_bit(real_ptr);
4,588,271✔
1086
            }
1087
        }
1088
      }
1089

1090
    /* now revive finalize-when-unreachable objects reachable from
1091
       other finalizable objects */
1092
      if (need_unreachable_finalization) {
29,460✔
1093
        curr_fo = GC_fnlz_roots.finalize_now;
10,858✔
1094
        GC_ASSERT(NULL == curr_fo || GC_fnlz_roots.fo_head != NULL);
10,858✔
1095
        prev_fo = NULL;
10,858✔
1096
        while (curr_fo != NULL) {
207,979✔
1097
          next_fo = fo_next(curr_fo);
186,263✔
1098
          if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
186,263✔
1099
            real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
882✔
1100
            if (!GC_is_marked(real_ptr)) {
882✔
1101
              GC_set_mark_bit(real_ptr);
882✔
1102
            } else {
1103
              if (NULL == prev_fo) {
×
1104
                SET_FINALIZE_NOW(next_fo);
×
1105
              } else {
1106
                fo_set_next(prev_fo, next_fo);
×
1107
                GC_dirty(prev_fo);
×
1108
              }
1109
              curr_fo -> fo_hidden_base =
×
1110
                                GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
×
1111
              GC_bytes_finalized -=
×
1112
                  curr_fo->fo_object_size + sizeof(struct finalizable_object);
×
1113

1114
              i = HASH2(real_ptr, GC_log_fo_table_size);
×
1115
              fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
×
1116
              GC_dirty(curr_fo);
×
1117
              GC_fo_entries++;
×
1118
              GC_fnlz_roots.fo_head[i] = curr_fo;
×
1119
              curr_fo = prev_fo;
×
1120
              needs_barrier = TRUE;
×
1121
            }
1122
          }
1123
          prev_fo = curr_fo;
186,263✔
1124
          curr_fo = next_fo;
186,263✔
1125
        }
1126
      }
1127
  }
1128
  if (needs_barrier)
29,460✔
1129
    GC_dirty(GC_fnlz_roots.fo_head); /* entire object */
848✔
1130

1131
  /* Remove dangling disappearing links. */
1132
  GC_make_disappearing_links_disappear(&GC_dl_hashtbl, TRUE);
29,460✔
1133

1134
# ifndef GC_TOGGLE_REFS_NOT_NEEDED
1135
    GC_clear_togglerefs();
29,460✔
1136
# endif
1137
# ifndef GC_LONG_REFS_NOT_NEEDED
1138
    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, FALSE);
29,460✔
1139
    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, TRUE);
29,460✔
1140
# endif
1141

1142
  if (GC_fail_count) {
29,460✔
1143
    /* Don't prevent running finalizers if there has been an allocation */
1144
    /* failure recently.                                                */
1145
#   ifdef THREADS
1146
      GC_reset_finalizer_nested();
×
1147
#   else
1148
      GC_finalizer_nested = 0;
1149
#   endif
1150
  }
1151
}
29,460✔
1152

1153
/* Count of finalizers to run, at most, during a single invocation      */
1154
/* of GC_invoke_finalizers(); zero means no limit.  Accessed with the   */
1155
/* allocation lock held.                                                */
1156
STATIC unsigned GC_interrupt_finalizers = 0;
1157

1158
#ifndef JAVA_FINALIZATION_NOT_NEEDED
1159

1160
  /* Enqueue all remaining finalizers to be run.        */
1161
  /* A collection in progress, if any, is completed     */
1162
  /* when the first finalizer is enqueued.              */
1163
  STATIC void GC_enqueue_all_finalizers(void)
×
1164
  {
1165
    size_t i;
1166
    size_t fo_size = GC_fnlz_roots.fo_head == NULL ? 0 :
×
1167
                                (size_t)1 << GC_log_fo_table_size;
×
1168

1169
    GC_ASSERT(I_HOLD_LOCK());
×
1170
    GC_bytes_finalized = 0;
×
1171
    for (i = 0; i < fo_size; i++) {
×
1172
      struct finalizable_object * curr_fo = GC_fnlz_roots.fo_head[i];
×
1173

1174
      GC_fnlz_roots.fo_head[i] = NULL;
×
1175
      while (curr_fo != NULL) {
×
1176
          struct finalizable_object * next_fo;
1177
          ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
×
1178

1179
          GC_mark_fo(real_ptr, GC_normal_finalize_mark_proc);
×
1180
          GC_set_mark_bit(real_ptr);
×
1181
          GC_complete_ongoing_collection();
×
1182
          next_fo = fo_next(curr_fo);
×
1183

1184
          /* Add to list of objects awaiting finalization.      */
1185
          fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
×
1186
          GC_dirty(curr_fo);
×
1187
          SET_FINALIZE_NOW(curr_fo);
×
1188

1189
          /* unhide object pointer so any future collections will       */
1190
          /* see it.                                                    */
1191
          curr_fo -> fo_hidden_base =
×
1192
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
×
1193
          GC_bytes_finalized +=
×
1194
                curr_fo -> fo_object_size + sizeof(struct finalizable_object);
×
1195
          curr_fo = next_fo;
×
1196
      }
1197
    }
1198
    GC_fo_entries = 0;  /* all entries deleted from the hash table */
×
1199
  }
×
1200

1201
  /* Invoke all remaining finalizers that haven't yet been run.
1202
   * This is needed for strict compliance with the Java standard,
1203
   * which can make the runtime guarantee that all finalizers are run.
1204
   * Unfortunately, the Java standard implies we have to keep running
1205
   * finalizers until there are no more left, a potential infinite loop.
1206
   * YUCK.
1207
   * Note that this is even more dangerous than the usual Java
1208
   * finalizers, in that objects reachable from static variables
1209
   * may have been finalized when these finalizers are run.
1210
   * Finalizers run at this point must be prepared to deal with a
1211
   * mostly broken world.
1212
   */
1213
  GC_API void GC_CALL GC_finalize_all(void)
2✔
1214
  {
1215
    LOCK();
2✔
1216
    while (GC_fo_entries > 0) {
4✔
1217
      GC_enqueue_all_finalizers();
×
1218
      GC_interrupt_finalizers = 0; /* reset */
×
1219
      UNLOCK();
×
1220
      GC_invoke_finalizers();
×
1221
      /* Running the finalizers in this thread is arguably not a good   */
1222
      /* idea when we should be notifying another thread to run them.   */
1223
      /* But otherwise we don't have a great way to wait for them to    */
1224
      /* run.                                                           */
1225
      LOCK();
×
1226
    }
1227
    UNLOCK();
2✔
1228
  }
2✔
1229

1230
#endif /* !JAVA_FINALIZATION_NOT_NEEDED */
1231

1232
GC_API void GC_CALL GC_set_interrupt_finalizers(unsigned value)
2✔
1233
{
1234
  LOCK();
2✔
1235
  GC_interrupt_finalizers = value;
2✔
1236
  UNLOCK();
2✔
1237
}
2✔
1238

1239
GC_API unsigned GC_CALL GC_get_interrupt_finalizers(void)
2✔
1240
{
1241
  unsigned value;
1242

1243
  LOCK();
2✔
1244
  value = GC_interrupt_finalizers;
2✔
1245
  UNLOCK();
2✔
1246
  return value;
2✔
1247
}
1248

1249
/* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
1250
/* finalizers can only be called from some kind of "safe state" and     */
1251
/* getting into that safe state is expensive.)                          */
1252
GC_API int GC_CALL GC_should_invoke_finalizers(void)
12,013,105✔
1253
{
1254
# ifdef AO_HAVE_load
1255
    return AO_load((volatile AO_t *)&GC_fnlz_roots.finalize_now) != 0;
12,013,105✔
1256
# else
1257
    return GC_fnlz_roots.finalize_now != NULL;
1258
# endif /* !THREADS */
1259
}
1260

1261
/* Invoke finalizers for all objects that are ready to be finalized.    */
1262
GC_API int GC_CALL GC_invoke_finalizers(void)
1,832✔
1263
{
1264
    int count = 0;
1,832✔
1265
    word bytes_freed_before = 0; /* initialized to prevent warning. */
1,832✔
1266

1267
    GC_ASSERT(I_DONT_HOLD_LOCK());
1,832✔
1268
    while (GC_should_invoke_finalizers()) {
4,592,798✔
1269
        struct finalizable_object * curr_fo;
1270

1271
        LOCK();
4,589,245✔
1272
        if (count == 0) {
4,589,253✔
1273
            bytes_freed_before = GC_bytes_freed;
1,798✔
1274
            /* Don't do this outside, since we need the lock. */
1275
        } else if (EXPECT(GC_interrupt_finalizers != 0, FALSE)
4,587,455✔
1276
                   && (unsigned)count >= GC_interrupt_finalizers) {
×
1277
            UNLOCK();
×
1278
            break;
×
1279
        }
1280
        curr_fo = GC_fnlz_roots.finalize_now;
4,589,253✔
1281
#       ifdef THREADS
1282
            if (EXPECT(NULL == curr_fo, FALSE)) {
4,589,253✔
1283
                UNLOCK();
100✔
1284
                break;
100✔
1285
            }
1286
#       endif
1287
        SET_FINALIZE_NOW(fo_next(curr_fo));
4,589,153✔
1288
        UNLOCK();
4,589,153✔
1289
        fo_set_next(curr_fo, 0);
4,589,153✔
1290
        (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
9,178,306✔
1291
                              curr_fo -> fo_client_data);
4,589,153✔
1292
        curr_fo -> fo_client_data = 0;
4,589,134✔
1293
        ++count;
4,589,134✔
1294
        /* Explicit freeing of curr_fo is probably a bad idea.  */
1295
        /* It throws off accounting if nearly all objects are   */
1296
        /* finalizable.  Otherwise it should not matter.        */
1297
    }
1298
    /* bytes_freed_before is initialized whenever count != 0 */
1299
    if (count != 0
1,829✔
1300
#         if defined(THREADS) && !defined(THREAD_SANITIZER)
1301
            /* A quick check whether some memory was freed.     */
1302
            /* The race with GC_free() is safe to be ignored    */
1303
            /* because we only need to know if the current      */
1304
            /* thread has deallocated something.                */
1305
            && bytes_freed_before != GC_bytes_freed
1,798✔
1306
#         endif
1307
       ) {
1308
        LOCK();
794✔
1309
        GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
794✔
1310
        UNLOCK();
794✔
1311
    }
1312
    return count;
1,832✔
1313
}
1314

1315
static word last_finalizer_notification = 0;
1316

1317
GC_INNER void GC_notify_or_invoke_finalizers(void)
7,423,749✔
1318
{
1319
    GC_finalizer_notifier_proc notifier_fn = 0;
7,423,749✔
1320
#   if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1321
      static word last_back_trace_gc_no = 1;    /* Skip first one. */
1322
#   endif
1323

1324
#   if defined(THREADS) && !defined(KEEP_BACK_PTRS) \
1325
       && !defined(MAKE_BACK_GRAPH)
1326
      /* Quick check (while unlocked) for an empty finalization queue.  */
1327
      if (!GC_should_invoke_finalizers())
7,423,749✔
1328
        return;
7,422,439✔
1329
#   endif
1330
    LOCK();
1,802✔
1331

1332
    /* This is a convenient place to generate backtraces if appropriate, */
1333
    /* since that code is not callable with the allocation lock.         */
1334
#   if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1335
      if (GC_gc_no > last_back_trace_gc_no) {
1336
#       ifdef KEEP_BACK_PTRS
1337
          long i;
1338
          /* Stops when GC_gc_no wraps; that's OK.      */
1339
          last_back_trace_gc_no = GC_WORD_MAX;  /* disable others. */
1340
          for (i = 0; i < GC_backtraces; ++i) {
1341
              /* FIXME: This tolerates concurrent heap mutation,        */
1342
              /* which may cause occasional mysterious results.         */
1343
              /* We need to release the GC lock, since GC_print_callers */
1344
              /* acquires it.  It probably shouldn't.                   */
1345
              void *current = GC_generate_random_valid_address();
1346

1347
              UNLOCK();
1348
              GC_printf("\n****Chosen address %p in object\n", current);
1349
              GC_print_backtrace(current);
1350
              LOCK();
1351
          }
1352
          last_back_trace_gc_no = GC_gc_no;
1353
#       endif
1354
#       ifdef MAKE_BACK_GRAPH
1355
          if (GC_print_back_height) {
1356
            GC_print_back_graph_stats();
1357
          }
1358
#       endif
1359
      }
1360
#   endif
1361
    if (NULL == GC_fnlz_roots.finalize_now) {
1,802✔
1362
      UNLOCK();
4✔
1363
      return;
4✔
1364
    }
1365

1366
    if (!GC_finalize_on_demand) {
1,798✔
1367
      unsigned char *pnested;
1368

1369
#     ifdef THREADS
1370
        if (EXPECT(GC_in_thread_creation, FALSE)) {
1,798✔
1371
          UNLOCK();
×
1372
          return;
×
1373
        }
1374
#     endif
1375
      pnested = GC_check_finalizer_nested();
1,798✔
1376
      UNLOCK();
1,798✔
1377
      /* Skip GC_invoke_finalizers() if nested. */
1378
      if (pnested != NULL) {
1,798✔
1379
        (void)GC_invoke_finalizers();
1,798✔
1380
        *pnested = 0; /* Reset since no more finalizers or interrupted. */
1,798✔
1381
#       ifndef THREADS
1382
          GC_ASSERT(NULL == GC_fnlz_roots.finalize_now
1383
                    || GC_interrupt_finalizers > 0);
1384
#       endif   /* Otherwise GC can run concurrently and add more */
1385
      }
1386
      return;
1,798✔
1387
    }
1388

1389
    /* These variables require synchronization to avoid data races.     */
1390
    if (last_finalizer_notification != GC_gc_no) {
×
1391
        notifier_fn = GC_finalizer_notifier;
×
1392
        last_finalizer_notification = GC_gc_no;
×
1393
    }
1394
    UNLOCK();
×
1395
    if (notifier_fn != 0)
×
1396
        (*notifier_fn)(); /* Invoke the notifier */
×
1397
}
1398

1399
#ifndef SMALL_CONFIG
1400
# ifndef GC_LONG_REFS_NOT_NEEDED
1401
#   define IF_LONG_REFS_PRESENT_ELSE(x,y) (x)
1402
# else
1403
#   define IF_LONG_REFS_PRESENT_ELSE(x,y) (y)
1404
# endif
1405

1406
  GC_INNER void GC_print_finalization_stats(void)
×
1407
  {
1408
    struct finalizable_object *fo;
1409
    unsigned long ready = 0;
×
1410

1411
    GC_log_printf("%lu finalization entries;"
×
1412
                  " %lu/%lu short/long disappearing links alive\n",
1413
                  (unsigned long)GC_fo_entries,
×
1414
                  (unsigned long)GC_dl_hashtbl.entries,
×
1415
                  (unsigned long)IF_LONG_REFS_PRESENT_ELSE(
×
1416
                                                GC_ll_hashtbl.entries, 0));
1417

1418
    for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
×
1419
      ++ready;
×
1420
    GC_log_printf("%lu finalization-ready objects;"
×
1421
                  " %ld/%ld short/long links cleared\n",
1422
                  ready,
1423
                  (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
×
1424
                  (long)IF_LONG_REFS_PRESENT_ELSE(
×
1425
                              GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
1426
  }
×
1427
#endif /* !SMALL_CONFIG */
1428

1429
#endif /* !GC_NO_FINALIZATION */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc