• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1508

20 May 2023 08:27AM UTC coverage: 72.732% (-0.4%) from 73.093%
1508

push

travis-ci-com

ivmai
Fix alt-stack handling in GC_push_all_stacks if stack grows up
(a cherry-pick of commit 9229da044 from 'master')

* pthread_stop_world.c [STACK_GROWS_UP] (GC_push_all_stacks): Set hi
to p->altstack (instead of p->altstack+p->altstack_size) if
lo is inside altstack; remove relevant FIXME item.

1 of 1 new or added line in 1 file covered. (100.0%)

7215 of 9920 relevant lines covered (72.73%)

11045264.72 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

61.62
/finalize.c
1
/*
2
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
 * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4
 * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5
 * Copyright (C) 2007 Free Software Foundation, Inc
6
 * Copyright (c) 2008-2020 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose,  provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/gc_pmark.h"
19

20
#ifndef GC_NO_FINALIZATION
21
# include "javaxfc.h" /* to get GC_finalize_all() as extern "C" */
22

23
/* Type of mark procedure used for marking from finalizable object.     */
24
/* This procedure normally does not mark the object, only its           */
25
/* descendants.                                                         */
26
typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
27

28
#define HASH3(addr,size,log_size) \
29
        ((((word)(addr) >> 3) ^ ((word)(addr) >> (3 + (log_size)))) \
30
         & ((size) - 1))
31
#define HASH2(addr,log_size) HASH3(addr, (word)1 << (log_size), log_size)
32

33
struct hash_chain_entry {
34
    word hidden_key;
35
    struct hash_chain_entry * next;
36
};
37

38
struct disappearing_link {
39
    struct hash_chain_entry prolog;
40
#   define dl_hidden_link prolog.hidden_key
41
                                /* Field to be cleared.         */
42
#   define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
43
#   define dl_set_next(x, y) \
44
                (void)((x)->prolog.next = (struct hash_chain_entry *)(y))
45
    word dl_hidden_obj;         /* Pointer to object base       */
46
};
47

48
struct finalizable_object {
49
    struct hash_chain_entry prolog;
50
#   define fo_hidden_base prolog.hidden_key
51
                                /* Pointer to object base.      */
52
                                /* No longer hidden once object */
53
                                /* is on finalize_now queue.    */
54
#   define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
55
#   define fo_set_next(x,y) ((x)->prolog.next = (struct hash_chain_entry *)(y))
56
    GC_finalization_proc fo_fn; /* Finalizer.                   */
57
    ptr_t fo_client_data;
58
    word fo_object_size;        /* In bytes.                    */
59
    finalization_mark_proc fo_mark_proc;        /* Mark-through procedure */
60
};
61

62
#ifdef AO_HAVE_store
63
  /* Update finalize_now atomically as GC_should_invoke_finalizers does */
64
  /* not acquire the allocation lock.                                   */
65
# define SET_FINALIZE_NOW(fo) \
66
            AO_store((volatile AO_t *)&GC_fnlz_roots.finalize_now, (AO_t)(fo))
67
#else
68
# define SET_FINALIZE_NOW(fo) (void)(GC_fnlz_roots.finalize_now = (fo))
69
#endif /* !THREADS */
70

71
GC_API void GC_CALL GC_push_finalizer_structures(void)
23,978✔
72
{
73
  GC_ASSERT((word)(&GC_dl_hashtbl.head) % sizeof(word) == 0);
74
  GC_ASSERT((word)(&GC_fnlz_roots) % sizeof(word) == 0);
75
# ifndef GC_LONG_REFS_NOT_NEEDED
76
    GC_ASSERT((word)(&GC_ll_hashtbl.head) % sizeof(word) == 0);
77
    GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
23,978✔
78
# endif
79
  GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
23,978✔
80
  GC_PUSH_ALL_SYM(GC_fnlz_roots);
23,978✔
81
  /* GC_toggleref_arr is pushed specially by GC_mark_togglerefs.        */
82
}
23,978✔
83

84
/* Threshold of log_size to initiate full collection before growing     */
85
/* a hash table.                                                        */
86
#ifndef GC_ON_GROW_LOG_SIZE_MIN
87
# define GC_ON_GROW_LOG_SIZE_MIN CPP_LOG_HBLKSIZE
88
#endif
89

90
/* Double the size of a hash table. *log_size_ptr is the log of its     */
91
/* current size.  May be a no-op.                                       */
92
/* *table is a pointer to an array of hash headers.  If we succeed, we  */
93
/* update both *table and *log_size_ptr.  Lock is held.                 */
94
STATIC void GC_grow_table(struct hash_chain_entry ***table,
1,775✔
95
                          unsigned *log_size_ptr, word *entries_ptr)
96
{
97
    word i;
98
    struct hash_chain_entry *p;
99
    unsigned log_old_size = *log_size_ptr;
1,775✔
100
    unsigned log_new_size = log_old_size + 1;
1,775✔
101
    word old_size = *table == NULL ? 0 : (word)1 << log_old_size;
1,775✔
102
    word new_size = (word)1 << log_new_size;
1,775✔
103
    /* FIXME: Power of 2 size often gets rounded up to one more page. */
104
    struct hash_chain_entry **new_table;
105

106
    GC_ASSERT(I_HOLD_LOCK());
1,775✔
107
    /* Avoid growing the table in case of at least 25% of entries can   */
108
    /* be deleted by enforcing a collection.  Ignored for small tables. */
109
    /* In incremental mode we skip this optimization, as we want to     */
110
    /* avoid triggering a full GC whenever possible.                    */
111
    if (log_old_size >= GC_ON_GROW_LOG_SIZE_MIN && !GC_incremental) {
1,775✔
112
      IF_CANCEL(int cancel_state;)
113

114
      DISABLE_CANCEL(cancel_state);
208✔
115
      (void)GC_try_to_collect_inner(GC_never_stop_func);
208✔
116
      RESTORE_CANCEL(cancel_state);
208✔
117
      /* GC_finalize might decrease entries value.  */
118
      if (*entries_ptr < ((word)1 << log_old_size) - (*entries_ptr >> 2))
208✔
119
        return;
200✔
120
    }
121

122
    new_table = (struct hash_chain_entry **)
1,575✔
123
                    GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
1,575✔
124
                        (size_t)new_size * sizeof(struct hash_chain_entry *),
125
                        NORMAL);
126
    if (new_table == 0) {
1,575✔
127
        if (*table == 0) {
×
128
            ABORT("Insufficient space for initial table allocation");
×
129
        } else {
130
            return;
×
131
        }
132
    }
133
    for (i = 0; i < old_size; i++) {
623,895✔
134
      p = (*table)[i];
622,320✔
135
      while (p != 0) {
1,851,853✔
136
        ptr_t real_key = (ptr_t)GC_REVEAL_POINTER(p->hidden_key);
607,213✔
137
        struct hash_chain_entry *next = p -> next;
607,213✔
138
        size_t new_hash = HASH3(real_key, new_size, log_new_size);
607,213✔
139

140
        p -> next = new_table[new_hash];
607,213✔
141
        GC_dirty(p);
607,213✔
142
        new_table[new_hash] = p;
607,213✔
143
        p = next;
607,213✔
144
      }
145
    }
146
    *log_size_ptr = log_new_size;
1,575✔
147
    *table = new_table;
1,575✔
148
    GC_dirty(new_table); /* entire object */
1,575✔
149
}
150

151
GC_API int GC_CALL GC_register_disappearing_link(void * * link)
×
152
{
153
    ptr_t base;
154

155
    base = (ptr_t)GC_base(link);
×
156
    if (base == 0)
×
157
        ABORT("Bad arg to GC_register_disappearing_link");
×
158
    return(GC_general_register_disappearing_link(link, base));
×
159
}
160

161
STATIC int GC_register_disappearing_link_inner(
743,292✔
162
                        struct dl_hashtbl_s *dl_hashtbl, void **link,
163
                        const void *obj, const char *tbl_log_name)
164
{
165
    struct disappearing_link *curr_dl;
166
    size_t index;
167
    struct disappearing_link * new_dl;
168
    DCL_LOCK_STATE;
169

170
    if (EXPECT(GC_find_leak, FALSE)) return GC_UNIMPLEMENTED;
743,292✔
171
    LOCK();
743,292✔
172
    GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
743,292✔
173
    if (EXPECT(NULL == dl_hashtbl -> head, FALSE)
743,292✔
174
        || EXPECT(dl_hashtbl -> entries
743,204✔
175
                  > ((word)1 << dl_hashtbl -> log_size), FALSE)) {
176
        GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head,
1,023✔
177
                      &dl_hashtbl -> log_size, &dl_hashtbl -> entries);
178
        GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name,
1,023✔
179
                           1U << dl_hashtbl -> log_size);
×
180
    }
181
    index = HASH2(link, dl_hashtbl -> log_size);
743,292✔
182
    for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
1,537,653✔
183
         curr_dl = dl_next(curr_dl)) {
51,069✔
184
        if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
51,069✔
185
            curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
×
186
            UNLOCK();
×
187
            return GC_DUPLICATE;
×
188
        }
189
    }
190
    new_dl = (struct disappearing_link *)
743,292✔
191
        GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
192
    if (0 == new_dl) {
743,292✔
193
      GC_oom_func oom_fn = GC_oom_fn;
×
194
      UNLOCK();
×
195
      new_dl = (struct disappearing_link *)
×
196
                (*oom_fn)(sizeof(struct disappearing_link));
197
      if (0 == new_dl) {
×
198
        return GC_NO_MEMORY;
×
199
      }
200
      /* It's not likely we'll make it here, but ... */
201
      LOCK();
×
202
      /* Recalculate index since the table may grow.    */
203
      index = HASH2(link, dl_hashtbl -> log_size);
×
204
      /* Check again that our disappearing link not in the table. */
205
      for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
×
206
           curr_dl = dl_next(curr_dl)) {
×
207
        if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
×
208
          curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
×
209
          UNLOCK();
×
210
#         ifndef DBG_HDRS_ALL
211
            /* Free unused new_dl returned by GC_oom_fn() */
212
            GC_free((void *)new_dl);
×
213
#         endif
214
          return GC_DUPLICATE;
×
215
        }
216
      }
217
    }
218
    new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
743,292✔
219
    new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
743,292✔
220
    dl_set_next(new_dl, dl_hashtbl -> head[index]);
743,292✔
221
    GC_dirty(new_dl);
743,292✔
222
    dl_hashtbl -> head[index] = new_dl;
743,293✔
223
    dl_hashtbl -> entries++;
743,293✔
224
    GC_dirty(dl_hashtbl->head + index);
743,293✔
225
    UNLOCK();
743,293✔
226
    return GC_SUCCESS;
743,290✔
227
}
228

229
GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
372,318✔
230
                                                         const void * obj)
231
{
232
    if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
372,318✔
233
        ABORT("Bad arg to GC_general_register_disappearing_link");
×
234
    return GC_register_disappearing_link_inner(&GC_dl_hashtbl, link, obj,
372,318✔
235
                                               "dl");
236
}
237

238
#ifdef DBG_HDRS_ALL
239
# define FREE_DL_ENTRY(curr_dl) dl_set_next(curr_dl, NULL)
240
#else
241
# define FREE_DL_ENTRY(curr_dl) GC_free(curr_dl)
242
#endif
243

244
/* Unregisters given link and returns the link entry to free.   */
245
GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
370,974✔
246
                                struct dl_hashtbl_s *dl_hashtbl, void **link)
247
{
248
    struct disappearing_link *curr_dl;
249
    struct disappearing_link *prev_dl = NULL;
370,974✔
250
    size_t index;
251

252
    GC_ASSERT(I_HOLD_LOCK());
370,974✔
253
    if (EXPECT(NULL == dl_hashtbl -> head, FALSE)) return NULL;
370,974✔
254

255
    index = HASH2(link, dl_hashtbl -> log_size);
370,974✔
256
    for (curr_dl = dl_hashtbl -> head[index]; curr_dl;
741,948✔
257
         curr_dl = dl_next(curr_dl)) {
×
258
        if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
370,974✔
259
            /* Remove found entry from the table. */
260
            if (NULL == prev_dl) {
370,974✔
261
                dl_hashtbl -> head[index] = dl_next(curr_dl);
370,974✔
262
                GC_dirty(dl_hashtbl->head + index);
370,974✔
263
            } else {
264
                dl_set_next(prev_dl, dl_next(curr_dl));
×
265
                GC_dirty(prev_dl);
×
266
            }
267
            dl_hashtbl -> entries--;
370,974✔
268
            break;
370,974✔
269
        }
270
        prev_dl = curr_dl;
×
271
    }
272
    return curr_dl;
370,974✔
273
}
274

275
GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
185,487✔
276
{
277
    struct disappearing_link *curr_dl;
278
    DCL_LOCK_STATE;
279

280
    if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
185,487✔
281

282
    LOCK();
185,487✔
283
    curr_dl = GC_unregister_disappearing_link_inner(&GC_dl_hashtbl, link);
185,487✔
284
    UNLOCK();
185,487✔
285
    if (NULL == curr_dl) return 0;
185,487✔
286
    FREE_DL_ENTRY(curr_dl);
185,487✔
287
    return 1;
185,487✔
288
}
289

290
/* Toggle-ref support.  */
291
#ifndef GC_TOGGLE_REFS_NOT_NEEDED
292
  typedef union toggle_ref_u GCToggleRef;
293

294
  STATIC GC_toggleref_func GC_toggleref_callback = 0;
295

296
  GC_INNER void GC_process_togglerefs(void)
23,978✔
297
  {
298
    size_t i;
299
    size_t new_size = 0;
23,978✔
300
    GC_bool needs_barrier = FALSE;
23,978✔
301

302
    GC_ASSERT(I_HOLD_LOCK());
23,978✔
303
    for (i = 0; i < GC_toggleref_array_size; ++i) {
23,978✔
304
      GCToggleRef r = GC_toggleref_arr[i];
×
305
      void *obj = r.strong_ref;
×
306

307
      if (((word)obj & 1) != 0) {
×
308
        obj = GC_REVEAL_POINTER(r.weak_ref);
×
309
      }
310
      if (NULL == obj) {
×
311
        continue;
×
312
      }
313
      switch (GC_toggleref_callback(obj)) {
×
314
      case GC_TOGGLE_REF_DROP:
315
        break;
×
316
      case GC_TOGGLE_REF_STRONG:
317
        GC_toggleref_arr[new_size++].strong_ref = obj;
×
318
        needs_barrier = TRUE;
×
319
        break;
×
320
      case GC_TOGGLE_REF_WEAK:
321
        GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
×
322
        break;
×
323
      default:
324
        ABORT("Bad toggle-ref status returned by callback");
×
325
      }
326
    }
327

328
    if (new_size < GC_toggleref_array_size) {
23,978✔
329
      BZERO(&GC_toggleref_arr[new_size],
×
330
            (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
331
      GC_toggleref_array_size = new_size;
×
332
    }
333
    if (needs_barrier)
23,978✔
334
      GC_dirty(GC_toggleref_arr); /* entire object */
×
335
  }
23,978✔
336

337
  STATIC void GC_normal_finalize_mark_proc(ptr_t);
338

339
  static void push_and_mark_object(void *p)
×
340
  {
341
    GC_normal_finalize_mark_proc((ptr_t)p);
×
342
    while (!GC_mark_stack_empty()) {
×
343
      MARK_FROM_MARK_STACK();
×
344
    }
345
    GC_set_mark_bit(p);
×
346
    if (GC_mark_state != MS_NONE) {
×
347
      while (!GC_mark_some(0)) {
×
348
        /* Empty. */
349
      }
350
    }
351
  }
×
352

353
  STATIC void GC_mark_togglerefs(void)
23,982✔
354
  {
355
    size_t i;
356
    if (NULL == GC_toggleref_arr)
23,982✔
357
      return;
23,982✔
358

359
    GC_set_mark_bit(GC_toggleref_arr);
×
360
    for (i = 0; i < GC_toggleref_array_size; ++i) {
×
361
      void *obj = GC_toggleref_arr[i].strong_ref;
×
362
      if (obj != NULL && ((word)obj & 1) == 0) {
×
363
        push_and_mark_object(obj);
×
364
      }
365
    }
366
  }
367

368
  STATIC void GC_clear_togglerefs(void)
23,982✔
369
  {
370
    size_t i;
371
    for (i = 0; i < GC_toggleref_array_size; ++i) {
23,982✔
372
      if ((GC_toggleref_arr[i].weak_ref & 1) != 0) {
×
373
        if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))) {
×
374
          GC_toggleref_arr[i].weak_ref = 0;
×
375
        } else {
376
          /* No need to copy, BDWGC is a non-moving collector.    */
377
        }
378
      }
379
    }
380
  }
23,982✔
381

382
  GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func fn)
2✔
383
  {
384
    DCL_LOCK_STATE;
385

386
    LOCK();
2✔
387
    GC_toggleref_callback = fn;
2✔
388
    UNLOCK();
2✔
389
  }
2✔
390

391
  GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void)
2✔
392
  {
393
    GC_toggleref_func fn;
394
    DCL_LOCK_STATE;
395

396
    LOCK();
2✔
397
    fn = GC_toggleref_callback;
2✔
398
    UNLOCK();
2✔
399
    return fn;
2✔
400
  }
401

402
  static GC_bool ensure_toggleref_capacity(size_t capacity_inc)
×
403
  {
404
    GC_ASSERT(I_HOLD_LOCK());
×
405
    if (NULL == GC_toggleref_arr) {
×
406
      GC_toggleref_array_capacity = 32; /* initial capacity */
×
407
      GC_toggleref_arr = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
×
408
                        GC_toggleref_array_capacity * sizeof(GCToggleRef),
×
409
                        NORMAL);
410
      if (NULL == GC_toggleref_arr)
×
411
        return FALSE;
×
412
    }
413
    if (GC_toggleref_array_size + capacity_inc
×
414
        >= GC_toggleref_array_capacity) {
×
415
      GCToggleRef *new_array;
416
      while (GC_toggleref_array_capacity
×
417
              < GC_toggleref_array_size + capacity_inc) {
×
418
        GC_toggleref_array_capacity *= 2;
×
419
        if ((GC_toggleref_array_capacity
×
420
             & ((size_t)1 << (sizeof(size_t) * 8 - 1))) != 0)
×
421
          return FALSE; /* overflow */
×
422
      }
423

424
      new_array = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
×
425
                        GC_toggleref_array_capacity * sizeof(GCToggleRef),
×
426
                        NORMAL);
427
      if (NULL == new_array)
×
428
        return FALSE;
×
429
      if (EXPECT(GC_toggleref_array_size > 0, TRUE))
×
430
        BCOPY(GC_toggleref_arr, new_array,
×
431
              GC_toggleref_array_size * sizeof(GCToggleRef));
432
      GC_INTERNAL_FREE(GC_toggleref_arr);
×
433
      GC_toggleref_arr = new_array;
×
434
    }
435
    return TRUE;
×
436
  }
437

438
  GC_API int GC_CALL GC_toggleref_add(void *obj, int is_strong_ref)
×
439
  {
440
    int res = GC_SUCCESS;
×
441
    DCL_LOCK_STATE;
442

443
    GC_ASSERT(NONNULL_ARG_NOT_NULL(obj));
×
444
    LOCK();
×
445
    if (GC_toggleref_callback != 0) {
×
446
      if (!ensure_toggleref_capacity(1)) {
×
447
        res = GC_NO_MEMORY;
×
448
      } else {
449
        GC_toggleref_arr[GC_toggleref_array_size].strong_ref =
×
450
                        is_strong_ref ? obj : (void *)GC_HIDE_POINTER(obj);
×
451
        if (is_strong_ref)
×
452
          GC_dirty(GC_toggleref_arr + GC_toggleref_array_size);
×
453
        GC_toggleref_array_size++;
×
454
      }
455
    }
456
    UNLOCK();
×
457
    return res;
×
458
  }
459
#endif /* !GC_TOGGLE_REFS_NOT_NEEDED */
460

461
/* Finalizer callback support. */
462
STATIC GC_await_finalize_proc GC_object_finalized_proc = 0;
463

464
GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc fn)
2✔
465
{
466
  DCL_LOCK_STATE;
467

468
  LOCK();
2✔
469
  GC_object_finalized_proc = fn;
2✔
470
  UNLOCK();
2✔
471
}
2✔
472

473
GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
2✔
474
{
475
  GC_await_finalize_proc fn;
476
  DCL_LOCK_STATE;
477

478
  LOCK();
2✔
479
  fn = GC_object_finalized_proc;
2✔
480
  UNLOCK();
2✔
481
  return fn;
2✔
482
}
483

484
#ifndef GC_LONG_REFS_NOT_NEEDED
485
  GC_API int GC_CALL GC_register_long_link(void * * link, const void * obj)
370,974✔
486
  {
487
    if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
370,974✔
488
        ABORT("Bad arg to GC_register_long_link");
×
489
    return GC_register_disappearing_link_inner(&GC_ll_hashtbl, link, obj,
370,974✔
490
                                               "long dl");
491
  }
492

493
  GC_API int GC_CALL GC_unregister_long_link(void * * link)
185,487✔
494
  {
495
    struct disappearing_link *curr_dl;
496
    DCL_LOCK_STATE;
497

498
    if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
185,487✔
499

500
    LOCK();
185,487✔
501
    curr_dl = GC_unregister_disappearing_link_inner(&GC_ll_hashtbl, link);
185,487✔
502
    UNLOCK();
185,487✔
503
    if (NULL == curr_dl) return 0;
185,487✔
504
    FREE_DL_ENTRY(curr_dl);
185,487✔
505
    return 1;
185,487✔
506
  }
507
#endif /* !GC_LONG_REFS_NOT_NEEDED */
508

509
#ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED
510
  /* Moves a link.  Assume the lock is held.    */
511
  STATIC int GC_move_disappearing_link_inner(
1,112,922✔
512
                                struct dl_hashtbl_s *dl_hashtbl,
513
                                void **link, void **new_link)
514
  {
515
    struct disappearing_link *curr_dl, *new_dl;
516
    struct disappearing_link *prev_dl = NULL;
1,112,922✔
517
    size_t curr_index, new_index;
518
    word curr_hidden_link, new_hidden_link;
519

520
    GC_ASSERT(I_HOLD_LOCK());
1,112,922✔
521
    if (EXPECT(NULL == dl_hashtbl -> head, FALSE)) return GC_NOT_FOUND;
1,112,922✔
522

523
    /* Find current link.       */
524
    curr_index = HASH2(link, dl_hashtbl -> log_size);
1,112,922✔
525
    curr_hidden_link = GC_HIDE_POINTER(link);
1,112,922✔
526
    for (curr_dl = dl_hashtbl -> head[curr_index]; curr_dl;
2,250,858✔
527
         curr_dl = dl_next(curr_dl)) {
25,014✔
528
      if (curr_dl -> dl_hidden_link == curr_hidden_link)
766,962✔
529
        break;
741,948✔
530
      prev_dl = curr_dl;
25,014✔
531
    }
532
    if (EXPECT(NULL == curr_dl, FALSE)) {
1,112,922✔
533
      return GC_NOT_FOUND;
370,974✔
534
    } else if (link == new_link) {
741,948✔
535
      return GC_SUCCESS; /* Nothing to do.      */
370,974✔
536
    }
537

538
    /* link found; now check new_link not present.      */
539
    new_index = HASH2(new_link, dl_hashtbl -> log_size);
370,974✔
540
    new_hidden_link = GC_HIDE_POINTER(new_link);
370,974✔
541
    for (new_dl = dl_hashtbl -> head[new_index]; new_dl;
988,231✔
542
         new_dl = dl_next(new_dl)) {
246,283✔
543
      if (new_dl -> dl_hidden_link == new_hidden_link) {
246,283✔
544
        /* Target already registered; bail.     */
545
        return GC_DUPLICATE;
×
546
      }
547
    }
548

549
    /* Remove from old, add to new, update link.        */
550
    if (NULL == prev_dl) {
370,974✔
551
      dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
370,974✔
552
    } else {
553
      dl_set_next(prev_dl, dl_next(curr_dl));
×
554
      GC_dirty(prev_dl);
×
555
    }
556
    curr_dl -> dl_hidden_link = new_hidden_link;
370,974✔
557
    dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
370,974✔
558
    dl_hashtbl -> head[new_index] = curr_dl;
370,974✔
559
    GC_dirty(curr_dl);
370,974✔
560
    GC_dirty(dl_hashtbl->head); /* entire object */
370,974✔
561
    return GC_SUCCESS;
370,974✔
562
  }
563

564
  GC_API int GC_CALL GC_move_disappearing_link(void **link, void **new_link)
556,461✔
565
  {
566
    int result;
567
    DCL_LOCK_STATE;
568

569
    if (((word)new_link & (ALIGNMENT-1)) != 0
556,461✔
570
        || !NONNULL_ARG_NOT_NULL(new_link))
556,461✔
571
      ABORT("Bad new_link arg to GC_move_disappearing_link");
×
572
    if (((word)link & (ALIGNMENT-1)) != 0)
556,461✔
573
      return GC_NOT_FOUND; /* Nothing to do. */
×
574

575
    LOCK();
556,461✔
576
    result = GC_move_disappearing_link_inner(&GC_dl_hashtbl, link, new_link);
556,461✔
577
    UNLOCK();
556,461✔
578
    return result;
556,461✔
579
  }
580

581
# ifndef GC_LONG_REFS_NOT_NEEDED
582
    GC_API int GC_CALL GC_move_long_link(void **link, void **new_link)
556,460✔
583
    {
584
      int result;
585
      DCL_LOCK_STATE;
586

587
      if (((word)new_link & (ALIGNMENT-1)) != 0
556,460✔
588
          || !NONNULL_ARG_NOT_NULL(new_link))
556,460✔
589
        ABORT("Bad new_link arg to GC_move_long_link");
×
590
      if (((word)link & (ALIGNMENT-1)) != 0)
556,460✔
591
        return GC_NOT_FOUND; /* Nothing to do. */
×
592

593
      LOCK();
556,460✔
594
      result = GC_move_disappearing_link_inner(&GC_ll_hashtbl, link, new_link);
556,461✔
595
      UNLOCK();
556,461✔
596
      return result;
556,461✔
597
    }
598
# endif /* !GC_LONG_REFS_NOT_NEEDED */
599
#endif /* !GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED */
600

601
/* Possible finalization_marker procedures.  Note that mark stack       */
602
/* overflow is handled by the caller, and is not a disaster.            */
603
#if defined(_MSC_VER) && defined(I386)
604
  GC_ATTR_NOINLINE
605
  /* Otherwise some optimizer bug is tickled in VC for x86 (v19, at least). */
606
#endif
607
STATIC void GC_normal_finalize_mark_proc(ptr_t p)
8,582,510✔
608
{
609
    GC_mark_stack_top = GC_push_obj(p, HDR(p), GC_mark_stack_top,
8,582,510✔
610
                                    GC_mark_stack + GC_mark_stack_size);
8,582,510✔
611
}
8,582,510✔
612

613
/* This only pays very partial attention to the mark descriptor.        */
614
/* It does the right thing for normal and atomic objects, and treats    */
615
/* most others as normal.                                               */
616
STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
430,815✔
617
{
618
    hdr * hhdr = HDR(p);
430,815✔
619
    word descr = hhdr -> hb_descr;
430,815✔
620
    ptr_t q;
621
    ptr_t scan_limit;
622
    ptr_t target_limit = p + hhdr -> hb_sz - 1;
430,815✔
623

624
    if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
430,815✔
625
       scan_limit = p + descr - sizeof(word);
430,815✔
626
    } else {
627
       scan_limit = target_limit + 1 - sizeof(word);
×
628
    }
629
    for (q = p; (word)q <= (word)scan_limit; q += ALIGNMENT) {
2,465,150✔
630
        word r = *(word *)q;
2,034,335✔
631

632
        if (r < (word)p || r > (word)target_limit) {
2,034,335✔
633
            GC_PUSH_ONE_HEAP(r, q, GC_mark_stack_top);
2,034,335✔
634
        }
635
    }
636
}
430,815✔
637

638
STATIC void GC_null_finalize_mark_proc(ptr_t p GC_ATTR_UNUSED) {}
8,388,608✔
639

640
/* Possible finalization_marker procedures.  Note that mark stack       */
641
/* overflow is handled by the caller, and is not a disaster.            */
642

643
/* GC_unreachable_finalize_mark_proc is an alias for normal marking,    */
644
/* but it is explicitly tested for, and triggers different              */
645
/* behavior.  Objects registered in this way are not finalized          */
646
/* if they are reachable by other finalizable objects, even if those    */
647
/* other objects specify no ordering.                                   */
648
STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
×
649
{
650
    /* A dummy comparison to ensure the compiler not to optimize two    */
651
    /* identical functions into a single one (thus, to ensure a unique  */
652
    /* address of each).  Alternatively, GC_noop1(p) could be used.     */
653
    if (EXPECT(NULL == p, FALSE)) return;
×
654

655
    GC_normal_finalize_mark_proc(p);
×
656
}
657

658
static GC_bool need_unreachable_finalization = FALSE;
659
        /* Avoid the work if this is not used.  */
660
        /* TODO: turn need_unreachable_finalization into a counter */
661

662
/* Register a finalization function.  See gc.h for details.     */
663
/* The last parameter is a procedure that determines            */
664
/* marking for finalization ordering.  Any objects marked       */
665
/* by that procedure will be guaranteed to not have been        */
666
/* finalized when this finalizer is invoked.                    */
667
STATIC void GC_register_finalizer_inner(void * obj,
9,214,080✔
668
                                        GC_finalization_proc fn, void *cd,
669
                                        GC_finalization_proc *ofn, void **ocd,
670
                                        finalization_mark_proc mp)
671
{
672
    struct finalizable_object * curr_fo;
673
    size_t index;
674
    struct finalizable_object *new_fo = 0;
9,214,080✔
675
    hdr *hhdr = NULL; /* initialized to prevent warning. */
9,214,080✔
676
    DCL_LOCK_STATE;
677

678
    if (EXPECT(GC_find_leak, FALSE)) {
9,214,080✔
679
      /* No-op.  *ocd and *ofn remain unchanged.    */
680
      return;
×
681
    }
682
    LOCK();
9,214,080✔
683
    if (mp == GC_unreachable_finalize_mark_proc)
9,214,080✔
684
        need_unreachable_finalization = TRUE;
×
685
    if (EXPECT(NULL == GC_fnlz_roots.fo_head, FALSE)
9,214,080✔
686
        || EXPECT(GC_fo_entries > ((word)1 << GC_log_fo_table_size), FALSE)) {
9,214,030✔
687
        GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
752✔
688
                      &GC_log_fo_table_size, &GC_fo_entries);
689
        GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
752✔
690
                           1U << GC_log_fo_table_size);
×
691
    }
692
    /* in the THREADS case we hold allocation lock.             */
693
    for (;;) {
694
      struct finalizable_object *prev_fo = NULL;
9,214,080✔
695
      GC_oom_func oom_fn;
696

697
      index = HASH2(obj, GC_log_fo_table_size);
9,214,080✔
698
      curr_fo = GC_fnlz_roots.fo_head[index];
9,214,080✔
699
      while (curr_fo != 0) {
23,134,100✔
700
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
4,747,940✔
701
        if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(obj)) {
4,747,940✔
702
          /* Interruption by a signal in the middle of this     */
703
          /* should be safe.  The client may see only *ocd      */
704
          /* updated, but we'll declare that to be his problem. */
705
          if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
42,000✔
706
          if (ofn) *ofn = curr_fo -> fo_fn;
42,000✔
707
          /* Delete the structure for obj.      */
708
          if (prev_fo == 0) {
42,000✔
709
            GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
41,998✔
710
          } else {
711
            fo_set_next(prev_fo, fo_next(curr_fo));
2✔
712
            GC_dirty(prev_fo);
2✔
713
          }
714
          if (fn == 0) {
42,000✔
715
            GC_fo_entries--;
2,000✔
716
            /* May not happen if we get a signal.  But a high   */
717
            /* estimate will only make the table larger than    */
718
            /* necessary.                                       */
719
#           if !defined(THREADS) && !defined(DBG_HDRS_ALL)
720
              GC_free((void *)curr_fo);
721
#           endif
722
          } else {
723
            curr_fo -> fo_fn = fn;
40,000✔
724
            curr_fo -> fo_client_data = (ptr_t)cd;
40,000✔
725
            curr_fo -> fo_mark_proc = mp;
40,000✔
726
            GC_dirty(curr_fo);
40,000✔
727
            /* Reinsert it.  We deleted it first to maintain    */
728
            /* consistency in the event of a signal.            */
729
            if (prev_fo == 0) {
40,000✔
730
              GC_fnlz_roots.fo_head[index] = curr_fo;
40,000✔
731
            } else {
732
              fo_set_next(prev_fo, curr_fo);
×
733
              GC_dirty(prev_fo);
×
734
            }
735
          }
736
          if (NULL == prev_fo)
42,000✔
737
            GC_dirty(GC_fnlz_roots.fo_head + index);
41,998✔
738
          UNLOCK();
42,000✔
739
#         ifndef DBG_HDRS_ALL
740
              /* Free unused new_fo returned by GC_oom_fn() */
741
              GC_free((void *)new_fo);
42,000✔
742
#         endif
743
          return;
42,000✔
744
        }
745
        prev_fo = curr_fo;
4,705,940✔
746
        curr_fo = fo_next(curr_fo);
4,705,940✔
747
      }
748
      if (EXPECT(new_fo != 0, FALSE)) {
9,172,080✔
749
        /* new_fo is returned by GC_oom_fn().   */
750
        GC_ASSERT(fn != 0);
×
751
#       ifdef LINT2
752
          if (NULL == hhdr) ABORT("Bad hhdr in GC_register_finalizer_inner");
753
#       endif
754
        break;
×
755
      }
756
      if (fn == 0) {
9,172,080✔
757
        if (ocd) *ocd = 0;
297,981✔
758
        if (ofn) *ofn = 0;
297,981✔
759
        UNLOCK();
297,981✔
760
        return;
297,981✔
761
      }
762
      GET_HDR(obj, hhdr);
8,874,099✔
763
      if (EXPECT(0 == hhdr, FALSE)) {
8,874,099✔
764
        /* We won't collect it, hence finalizer wouldn't be run. */
765
        if (ocd) *ocd = 0;
×
766
        if (ofn) *ofn = 0;
×
767
        UNLOCK();
×
768
        return;
×
769
      }
770
      new_fo = (struct finalizable_object *)
8,874,099✔
771
        GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
772
      if (EXPECT(new_fo != 0, TRUE))
8,874,099✔
773
        break;
8,874,099✔
774
      oom_fn = GC_oom_fn;
×
775
      UNLOCK();
×
776
      new_fo = (struct finalizable_object *)
×
777
                (*oom_fn)(sizeof(struct finalizable_object));
778
      if (0 == new_fo) {
×
779
        /* No enough memory.  *ocd and *ofn remain unchanged.   */
780
        return;
×
781
      }
782
      /* It's not likely we'll make it here, but ... */
783
      LOCK();
×
784
      /* Recalculate index since the table may grow and         */
785
      /* check again that our finalizer is not in the table.    */
786
    }
×
787
    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
8,874,099✔
788
    if (ocd) *ocd = 0;
8,874,099✔
789
    if (ofn) *ofn = 0;
8,874,099✔
790
    new_fo -> fo_hidden_base = GC_HIDE_POINTER(obj);
8,874,099✔
791
    new_fo -> fo_fn = fn;
8,874,099✔
792
    new_fo -> fo_client_data = (ptr_t)cd;
8,874,099✔
793
    new_fo -> fo_object_size = hhdr -> hb_sz;
8,874,099✔
794
    new_fo -> fo_mark_proc = mp;
8,874,099✔
795
    fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
8,874,099✔
796
    GC_dirty(new_fo);
8,874,099✔
797
    GC_fo_entries++;
8,874,099✔
798
    GC_fnlz_roots.fo_head[index] = new_fo;
8,874,099✔
799
    GC_dirty(GC_fnlz_roots.fo_head + index);
8,874,099✔
800
    UNLOCK();
8,874,099✔
801
}
802

803
GC_API void GC_CALL GC_register_finalizer(void * obj,
185,491✔
804
                                  GC_finalization_proc fn, void * cd,
805
                                  GC_finalization_proc *ofn, void ** ocd)
806
{
807
    GC_register_finalizer_inner(obj, fn, cd, ofn,
185,491✔
808
                                ocd, GC_normal_finalize_mark_proc);
809
}
185,491✔
810

811
GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
639,981✔
812
                               GC_finalization_proc fn, void * cd,
813
                               GC_finalization_proc *ofn, void ** ocd)
814
{
815
    GC_register_finalizer_inner(obj, fn, cd, ofn,
639,981✔
816
                                ocd, GC_ignore_self_finalize_mark_proc);
817
}
639,981✔
818

819
GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
8,388,608✔
820
                               GC_finalization_proc fn, void * cd,
821
                               GC_finalization_proc *ofn, void ** ocd)
822
{
823
    GC_register_finalizer_inner(obj, fn, cd, ofn,
8,388,608✔
824
                                ocd, GC_null_finalize_mark_proc);
825
}
8,388,608✔
826

827
GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
×
828
                               GC_finalization_proc fn, void * cd,
829
                               GC_finalization_proc *ofn, void ** ocd)
830
{
831
    GC_ASSERT(GC_java_finalization);
×
832
    GC_register_finalizer_inner(obj, fn, cd, ofn,
×
833
                                ocd, GC_unreachable_finalize_mark_proc);
834
}
×
835

836
#ifndef NO_DEBUGGING
837
  STATIC void GC_dump_finalization_links(
×
838
                                const struct dl_hashtbl_s *dl_hashtbl)
839
  {
840
    size_t dl_size = (size_t)1 << dl_hashtbl -> log_size;
×
841
    size_t i;
842

843
    if (NULL == dl_hashtbl -> head) return; /* empty table  */
×
844

845
    for (i = 0; i < dl_size; i++) {
×
846
      struct disappearing_link *curr_dl;
847

848
      for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
×
849
           curr_dl = dl_next(curr_dl)) {
×
850
        ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_obj);
×
851
        ptr_t real_link = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_link);
×
852

853
        GC_printf("Object: %p, link: %p\n",
×
854
                  (void *)real_ptr, (void *)real_link);
855
      }
856
    }
857
  }
858

859
  GC_API void GC_CALL GC_dump_finalization(void)
×
860
  {
861
    struct finalizable_object * curr_fo;
862
    size_t i;
863
    size_t fo_size = GC_fnlz_roots.fo_head == NULL ? 0 :
×
864
                                (size_t)1 << GC_log_fo_table_size;
×
865

866
    GC_printf("Disappearing (short) links:\n");
×
867
    GC_dump_finalization_links(&GC_dl_hashtbl);
×
868
#   ifndef GC_LONG_REFS_NOT_NEEDED
869
      GC_printf("Disappearing long links:\n");
×
870
      GC_dump_finalization_links(&GC_ll_hashtbl);
×
871
#   endif
872
    GC_printf("Finalizers:\n");
×
873
    for (i = 0; i < fo_size; i++) {
×
874
      for (curr_fo = GC_fnlz_roots.fo_head[i];
×
875
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
×
876
        ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
×
877

878
        GC_printf("Finalizable object: %p\n", (void *)real_ptr);
×
879
      }
880
    }
881
  }
×
882
#endif /* !NO_DEBUGGING */
883

884
#ifndef SMALL_CONFIG
885
  STATIC word GC_old_dl_entries = 0; /* for stats printing */
886
# ifndef GC_LONG_REFS_NOT_NEEDED
887
    STATIC word GC_old_ll_entries = 0;
888
# endif
889
#endif /* !SMALL_CONFIG */
890

891
#ifndef THREADS
892
  /* Global variables to minimize the level of recursion when a client  */
893
  /* finalizer allocates memory.                                        */
894
  STATIC int GC_finalizer_nested = 0;
895
                        /* Only the lowest byte is used, the rest is    */
896
                        /* padding for proper global data alignment     */
897
                        /* required for some compilers (like Watcom).   */
898
  STATIC unsigned GC_finalizer_skipped = 0;
899

900
  /* Checks and updates the level of finalizers recursion.              */
901
  /* Returns NULL if GC_invoke_finalizers() should not be called by the */
902
  /* collector (to minimize the risk of a deep finalizers recursion),   */
903
  /* otherwise returns a pointer to GC_finalizer_nested.                */
904
  STATIC unsigned char *GC_check_finalizer_nested(void)
905
  {
906
    unsigned nesting_level = *(unsigned char *)&GC_finalizer_nested;
907
    if (nesting_level) {
908
      /* We are inside another GC_invoke_finalizers().          */
909
      /* Skip some implicitly-called GC_invoke_finalizers()     */
910
      /* depending on the nesting (recursion) level.            */
911
      if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
912
      GC_finalizer_skipped = 0;
913
    }
914
    *(char *)&GC_finalizer_nested = (char)(nesting_level + 1);
915
    return (unsigned char *)&GC_finalizer_nested;
916
  }
917
#endif /* THREADS */
918

919
GC_INLINE void GC_make_disappearing_links_disappear(
95,928✔
920
                                        struct dl_hashtbl_s* dl_hashtbl,
921
                                        GC_bool is_remove_dangling)
922
{
923
  size_t i;
924
  size_t dl_size = (size_t)1 << dl_hashtbl -> log_size;
95,928✔
925
  GC_bool needs_barrier = FALSE;
95,928✔
926

927
  GC_ASSERT(I_HOLD_LOCK());
95,928✔
928
  if (NULL == dl_hashtbl -> head) return; /* empty table  */
95,928✔
929

930
  for (i = 0; i < dl_size; i++) {
149,417,804✔
931
    struct disappearing_link *curr_dl, *next_dl;
932
    struct disappearing_link *prev_dl = NULL;
149,402,220✔
933

934
    for (curr_dl = dl_hashtbl->head[i]; curr_dl != NULL; curr_dl = next_dl) {
175,745,583✔
935
      next_dl = dl_next(curr_dl);
26,343,363✔
936
      if (is_remove_dangling) {
26,343,363✔
937
        ptr_t real_link = (ptr_t)GC_base(GC_REVEAL_POINTER(
12,985,614✔
938
                                                curr_dl->dl_hidden_link));
939

940
        if (NULL == real_link || EXPECT(GC_is_marked(real_link), TRUE)) {
12,985,614✔
941
          prev_dl = curr_dl;
12,985,614✔
942
          continue;
12,985,614✔
943
        }
944
      } else {
945
        if (EXPECT(GC_is_marked((ptr_t)GC_REVEAL_POINTER(
13,357,749✔
946
                                        curr_dl->dl_hidden_obj)), TRUE)) {
947
          prev_dl = curr_dl;
12,985,614✔
948
          continue;
12,985,614✔
949
        }
950
        *(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link) = NULL;
372,135✔
951
      }
952

953
      /* Delete curr_dl entry from dl_hashtbl.  */
954
      if (NULL == prev_dl) {
372,135✔
955
        dl_hashtbl -> head[i] = next_dl;
355,611✔
956
        needs_barrier = TRUE;
355,611✔
957
      } else {
958
        dl_set_next(prev_dl, next_dl);
16,524✔
959
        GC_dirty(prev_dl);
16,524✔
960
      }
961
      GC_clear_mark_bit(curr_dl);
372,135✔
962
      dl_hashtbl -> entries--;
372,135✔
963
    }
964
  }
965
  if (needs_barrier)
15,584✔
966
    GC_dirty(dl_hashtbl -> head); /* entire object */
806✔
967
}
968

969
/* Called with held lock (but the world is running).                    */
970
/* Cause disappearing links to disappear and unreachable objects to be  */
971
/* enqueued for finalization.                                           */
972
GC_INNER void GC_finalize(void)
23,982✔
973
{
974
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
975
    ptr_t real_ptr;
976
    size_t i;
977
    size_t fo_size = GC_fnlz_roots.fo_head == NULL ? 0 :
39,810✔
978
                                (size_t)1 << GC_log_fo_table_size;
15,828✔
979
    GC_bool needs_barrier = FALSE;
23,982✔
980

981
    GC_ASSERT(I_HOLD_LOCK());
23,982✔
982
#   ifndef SMALL_CONFIG
983
      /* Save current GC_[dl/ll]_entries value for stats printing */
984
      GC_old_dl_entries = GC_dl_hashtbl.entries;
23,982✔
985
#     ifndef GC_LONG_REFS_NOT_NEEDED
986
        GC_old_ll_entries = GC_ll_hashtbl.entries;
23,982✔
987
#     endif
988
#   endif
989

990
#   ifndef GC_TOGGLE_REFS_NOT_NEEDED
991
      GC_mark_togglerefs();
23,982✔
992
#   endif
993
    GC_make_disappearing_links_disappear(&GC_dl_hashtbl, FALSE);
23,982✔
994

995
  /* Mark all objects reachable via chains of 1 or more pointers        */
996
  /* from finalizable objects.                                          */
997
    GC_ASSERT(GC_mark_state == MS_NONE);
23,982✔
998
    for (i = 0; i < fo_size; i++) {
253,071,190✔
999
      for (curr_fo = GC_fnlz_roots.fo_head[i];
540,479,379✔
1000
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
34,384,963✔
1001
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
34,384,963✔
1002
        real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
34,384,963✔
1003
        if (!GC_is_marked(real_ptr)) {
34,384,963✔
1004
            GC_MARKED_FOR_FINALIZATION(real_ptr);
1005
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
9,013,325✔
1006
            if (GC_is_marked(real_ptr)) {
9,013,325✔
1007
                WARN("Finalization cycle involving %p\n", real_ptr);
×
1008
            }
1009
        }
1010
      }
1011
    }
1012
  /* Enqueue for finalization all objects that are still                */
1013
  /* unreachable.                                                       */
1014
    GC_bytes_finalized = 0;
23,982✔
1015
    for (i = 0; i < fo_size; i++) {
253,071,190✔
1016
      curr_fo = GC_fnlz_roots.fo_head[i];
253,047,208✔
1017
      prev_fo = 0;
253,047,208✔
1018
      while (curr_fo != 0) {
540,479,379✔
1019
        real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
34,384,963✔
1020
        if (!GC_is_marked(real_ptr)) {
34,384,963✔
1021
            if (!GC_java_finalization) {
8,872,080✔
1022
              GC_set_mark_bit(real_ptr);
×
1023
            }
1024
            /* Delete from hash table */
1025
              next_fo = fo_next(curr_fo);
8,872,080✔
1026
              if (NULL == prev_fo) {
8,872,080✔
1027
                GC_fnlz_roots.fo_head[i] = next_fo;
7,013,937✔
1028
                if (GC_object_finalized_proc) {
7,013,937✔
1029
                  GC_dirty(GC_fnlz_roots.fo_head + i);
×
1030
                } else {
1031
                  needs_barrier = TRUE;
7,013,937✔
1032
                }
1033
              } else {
1034
                fo_set_next(prev_fo, next_fo);
1,858,143✔
1035
                GC_dirty(prev_fo);
1,858,143✔
1036
              }
1037
              GC_fo_entries--;
8,872,080✔
1038
              if (GC_object_finalized_proc)
8,872,080✔
1039
                GC_object_finalized_proc(real_ptr);
×
1040

1041
            /* Add to list of objects awaiting finalization.    */
1042
              fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
8,872,080✔
1043
              GC_dirty(curr_fo);
8,872,080✔
1044
              SET_FINALIZE_NOW(curr_fo);
8,872,080✔
1045
              /* unhide object pointer so any future collections will   */
1046
              /* see it.                                                */
1047
              curr_fo -> fo_hidden_base =
8,872,080✔
1048
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
8,872,080✔
1049
              GC_bytes_finalized +=
17,744,160✔
1050
                        curr_fo -> fo_object_size
8,872,080✔
1051
                        + sizeof(struct finalizable_object);
1052
            GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
8,872,080✔
1053
            curr_fo = next_fo;
8,872,080✔
1054
        } else {
1055
            prev_fo = curr_fo;
25,512,883✔
1056
            curr_fo = fo_next(curr_fo);
25,512,883✔
1057
        }
1058
      }
1059
    }
1060

1061
  if (GC_java_finalization) {
23,982✔
1062
    /* make sure we mark everything reachable from objects finalized
1063
       using the no_order mark_proc */
1064
      for (curr_fo = GC_fnlz_roots.finalize_now;
8,924,740✔
1065
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
8,876,776✔
1066
        real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
8,876,776✔
1067
        if (!GC_is_marked(real_ptr)) {
8,876,776✔
1068
            if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
8,872,080✔
1069
                GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
8,388,608✔
1070
            }
1071
            if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
8,872,080✔
1072
                GC_set_mark_bit(real_ptr);
8,872,080✔
1073
            }
1074
        }
1075
      }
1076

1077
    /* now revive finalize-when-unreachable objects reachable from
1078
       other finalizable objects */
1079
      if (need_unreachable_finalization) {
23,982✔
1080
        curr_fo = GC_fnlz_roots.finalize_now;
×
1081
        GC_ASSERT(NULL == curr_fo || GC_fnlz_roots.fo_head != NULL);
×
1082
        prev_fo = NULL;
×
1083
        while (curr_fo != NULL) {
×
1084
          next_fo = fo_next(curr_fo);
×
1085
          if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
×
1086
            real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
×
1087
            if (!GC_is_marked(real_ptr)) {
×
1088
              GC_set_mark_bit(real_ptr);
×
1089
            } else {
1090
              if (NULL == prev_fo) {
×
1091
                SET_FINALIZE_NOW(next_fo);
×
1092
              } else {
1093
                fo_set_next(prev_fo, next_fo);
×
1094
                GC_dirty(prev_fo);
×
1095
              }
1096
              curr_fo -> fo_hidden_base =
×
1097
                                GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
×
1098
              GC_bytes_finalized -=
×
1099
                  curr_fo->fo_object_size + sizeof(struct finalizable_object);
×
1100

1101
              i = HASH2(real_ptr, GC_log_fo_table_size);
×
1102
              fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
×
1103
              GC_dirty(curr_fo);
×
1104
              GC_fo_entries++;
×
1105
              GC_fnlz_roots.fo_head[i] = curr_fo;
×
1106
              curr_fo = prev_fo;
×
1107
              needs_barrier = TRUE;
×
1108
            }
1109
          }
1110
          prev_fo = curr_fo;
×
1111
          curr_fo = next_fo;
×
1112
        }
1113
      }
1114
  }
1115
  if (needs_barrier)
23,982✔
1116
    GC_dirty(GC_fnlz_roots.fo_head); /* entire object */
1,242✔
1117

1118
  /* Remove dangling disappearing links. */
1119
  GC_make_disappearing_links_disappear(&GC_dl_hashtbl, TRUE);
23,982✔
1120

1121
# ifndef GC_TOGGLE_REFS_NOT_NEEDED
1122
    GC_clear_togglerefs();
23,982✔
1123
# endif
1124
# ifndef GC_LONG_REFS_NOT_NEEDED
1125
    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, FALSE);
23,982✔
1126
    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, TRUE);
23,982✔
1127
# endif
1128

1129
  if (GC_fail_count) {
23,982✔
1130
    /* Don't prevent running finalizers if there has been an allocation */
1131
    /* failure recently.                                                */
1132
#   ifdef THREADS
1133
      GC_reset_finalizer_nested();
×
1134
#   else
1135
      GC_finalizer_nested = 0;
1136
#   endif
1137
  }
1138
}
23,982✔
1139

1140
#ifndef JAVA_FINALIZATION_NOT_NEEDED
1141

1142
  /* Enqueue all remaining finalizers to be run.        */
1143
  STATIC void GC_enqueue_all_finalizers(void)
×
1144
  {
1145
    struct finalizable_object * next_fo;
1146
    size_t i;
1147
    size_t fo_size = GC_fnlz_roots.fo_head == NULL ? 0 :
×
1148
                                (size_t)1 << GC_log_fo_table_size;
×
1149

1150
    GC_ASSERT(I_HOLD_LOCK());
×
1151
    GC_bytes_finalized = 0;
×
1152
    for (i = 0; i < fo_size; i++) {
×
1153
      struct finalizable_object * curr_fo = GC_fnlz_roots.fo_head[i];
×
1154

1155
      GC_fnlz_roots.fo_head[i] = NULL;
×
1156
      while (curr_fo != NULL) {
×
1157
          ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
×
1158

1159
          GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
×
1160
          GC_set_mark_bit(real_ptr);
×
1161

1162
          next_fo = fo_next(curr_fo);
×
1163

1164
          /* Add to list of objects awaiting finalization.      */
1165
          fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
×
1166
          GC_dirty(curr_fo);
×
1167
          SET_FINALIZE_NOW(curr_fo);
×
1168

1169
          /* unhide object pointer so any future collections will       */
1170
          /* see it.                                                    */
1171
          curr_fo -> fo_hidden_base =
×
1172
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
×
1173
          GC_bytes_finalized +=
×
1174
                curr_fo -> fo_object_size + sizeof(struct finalizable_object);
×
1175
          curr_fo = next_fo;
×
1176
      }
1177
    }
1178
    GC_fo_entries = 0;  /* all entries deleted from the hash table */
×
1179
  }
×
1180

1181
  /* Invoke all remaining finalizers that haven't yet been run.
1182
   * This is needed for strict compliance with the Java standard,
1183
   * which can make the runtime guarantee that all finalizers are run.
1184
   * Unfortunately, the Java standard implies we have to keep running
1185
   * finalizers until there are no more left, a potential infinite loop.
1186
   * YUCK.
1187
   * Note that this is even more dangerous than the usual Java
1188
   * finalizers, in that objects reachable from static variables
1189
   * may have been finalized when these finalizers are run.
1190
   * Finalizers run at this point must be prepared to deal with a
1191
   * mostly broken world.
1192
   * This routine is externally callable, so is called without
1193
   * the allocation lock.
1194
   */
1195
  GC_API void GC_CALL GC_finalize_all(void)
×
1196
  {
1197
    DCL_LOCK_STATE;
1198

1199
    LOCK();
×
1200
    while (GC_fo_entries > 0) {
×
1201
      GC_enqueue_all_finalizers();
×
1202
      UNLOCK();
×
1203
      GC_invoke_finalizers();
×
1204
      /* Running the finalizers in this thread is arguably not a good   */
1205
      /* idea when we should be notifying another thread to run them.   */
1206
      /* But otherwise we don't have a great way to wait for them to    */
1207
      /* run.                                                           */
1208
      LOCK();
×
1209
    }
1210
    UNLOCK();
×
1211
  }
×
1212

1213
#endif /* !JAVA_FINALIZATION_NOT_NEEDED */
1214

1215
/* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
1216
/* finalizers can only be called from some kind of "safe state" and     */
1217
/* getting into that safe state is expensive.)                          */
1218
GC_API int GC_CALL GC_should_invoke_finalizers(void)
9,246,754✔
1219
{
1220
# ifdef AO_HAVE_load
1221
    return AO_load((volatile AO_t *)&GC_fnlz_roots.finalize_now) != 0;
9,246,754✔
1222
# else
1223
    return GC_fnlz_roots.finalize_now != NULL;
1224
# endif /* !THREADS */
1225
}
1226

1227
/* Invoke finalizers for all objects that are ready to be finalized.    */
1228
/* Should be called without allocation lock.                            */
1229
GC_API int GC_CALL GC_invoke_finalizers(void)
3,480✔
1230
{
1231
    int count = 0;
3,480✔
1232
    word bytes_freed_before = 0; /* initialized to prevent warning. */
3,480✔
1233
    DCL_LOCK_STATE;
1234

1235
    while (GC_should_invoke_finalizers()) {
8,879,029✔
1236
        struct finalizable_object * curr_fo;
1237

1238
#       ifdef THREADS
1239
            LOCK();
8,872,276✔
1240
#       endif
1241
        if (count == 0) {
8,872,287✔
1242
            bytes_freed_before = GC_bytes_freed;
3,446✔
1243
            /* Don't do this outside, since we need the lock. */
1244
        }
1245
        curr_fo = GC_fnlz_roots.finalize_now;
8,872,287✔
1246
#       ifdef THREADS
1247
            if (curr_fo != NULL)
8,872,287✔
1248
                SET_FINALIZE_NOW(fo_next(curr_fo));
8,872,080✔
1249
            UNLOCK();
8,872,287✔
1250
            if (curr_fo == 0) break;
8,872,282✔
1251
#       else
1252
            GC_fnlz_roots.finalize_now = fo_next(curr_fo);
1253
#       endif
1254
        fo_set_next(curr_fo, 0);
8,872,080✔
1255
        (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
17,744,160✔
1256
                              curr_fo -> fo_client_data);
8,872,080✔
1257
        curr_fo -> fo_client_data = 0;
8,872,069✔
1258
        ++count;
8,872,069✔
1259
        /* Explicit freeing of curr_fo is probably a bad idea.  */
1260
        /* It throws off accounting if nearly all objects are   */
1261
        /* finalizable.  Otherwise it should not matter.        */
1262
    }
1263
    /* bytes_freed_before is initialized whenever count != 0 */
1264
    if (count != 0
3,478✔
1265
#         if defined(THREADS) && !defined(THREAD_SANITIZER)
1266
            /* A quick check whether some memory was freed.     */
1267
            /* The race with GC_free() is safe to be ignored    */
1268
            /* because we only need to know if the current      */
1269
            /* thread has deallocated something.                */
1270
            && bytes_freed_before != GC_bytes_freed
3,444✔
1271
#         endif
1272
       ) {
1273
        LOCK();
1,899✔
1274
        GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
1,899✔
1275
        UNLOCK();
1,899✔
1276
    }
1277
    return count;
3,480✔
1278
}
1279

1280
static word last_finalizer_notification = 0;
1281

1282
GC_INNER void GC_notify_or_invoke_finalizers(void)
9,845,247✔
1283
{
1284
    GC_finalizer_notifier_proc notifier_fn = 0;
9,845,247✔
1285
#   if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1286
      static word last_back_trace_gc_no = 1;    /* Skip first one. */
1287
#   endif
1288
    DCL_LOCK_STATE;
1289

1290
#   if defined(THREADS) && !defined(KEEP_BACK_PTRS) \
1291
       && !defined(MAKE_BACK_GRAPH)
1292
      /* Quick check (while unlocked) for an empty finalization queue.  */
1293
      if (!GC_should_invoke_finalizers())
9,845,247✔
1294
        return;
9,842,427✔
1295
#   endif
1296
    LOCK();
3,446✔
1297

1298
    /* This is a convenient place to generate backtraces if appropriate, */
1299
    /* since that code is not callable with the allocation lock.         */
1300
#   if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1301
      if (GC_gc_no > last_back_trace_gc_no) {
1302
#       ifdef KEEP_BACK_PTRS
1303
          long i;
1304
          /* Stops when GC_gc_no wraps; that's OK.      */
1305
          last_back_trace_gc_no = GC_WORD_MAX;  /* disable others. */
1306
          for (i = 0; i < GC_backtraces; ++i) {
1307
              /* FIXME: This tolerates concurrent heap mutation,        */
1308
              /* which may cause occasional mysterious results.         */
1309
              /* We need to release the GC lock, since GC_print_callers */
1310
              /* acquires it.  It probably shouldn't.                   */
1311
              void *current = GC_generate_random_valid_address();
1312

1313
              UNLOCK();
1314
              GC_printf("\n****Chosen address %p in object\n", current);
1315
              GC_print_backtrace(current);
1316
              LOCK();
1317
          }
1318
          last_back_trace_gc_no = GC_gc_no;
1319
#       endif
1320
#       ifdef MAKE_BACK_GRAPH
1321
          if (GC_print_back_height) {
1322
            GC_print_back_graph_stats();
1323
          }
1324
#       endif
1325
      }
1326
#   endif
1327
    if (NULL == GC_fnlz_roots.finalize_now) {
3,448✔
1328
      UNLOCK();
2✔
1329
      return;
2✔
1330
    }
1331

1332
    if (!GC_finalize_on_demand) {
3,446✔
1333
      unsigned char *pnested = GC_check_finalizer_nested();
3,446✔
1334
      UNLOCK();
3,446✔
1335
      /* Skip GC_invoke_finalizers() if nested */
1336
      if (pnested != NULL) {
3,446✔
1337
        (void) GC_invoke_finalizers();
3,446✔
1338
        *pnested = 0; /* Reset since no more finalizers. */
3,446✔
1339
#       ifndef THREADS
1340
          GC_ASSERT(NULL == GC_fnlz_roots.finalize_now);
1341
#       endif   /* Otherwise GC can run concurrently and add more */
1342
      }
1343
      return;
3,446✔
1344
    }
1345

1346
    /* These variables require synchronization to avoid data races.     */
1347
    if (last_finalizer_notification != GC_gc_no) {
×
1348
        notifier_fn = GC_finalizer_notifier;
×
1349
        last_finalizer_notification = GC_gc_no;
×
1350
    }
1351
    UNLOCK();
×
1352
    if (notifier_fn != 0)
×
1353
        (*notifier_fn)(); /* Invoke the notifier */
×
1354
}
1355

1356
#ifndef SMALL_CONFIG
1357
# ifndef GC_LONG_REFS_NOT_NEEDED
1358
#   define IF_LONG_REFS_PRESENT_ELSE(x,y) (x)
1359
# else
1360
#   define IF_LONG_REFS_PRESENT_ELSE(x,y) (y)
1361
# endif
1362

1363
  GC_INNER void GC_print_finalization_stats(void)
×
1364
  {
1365
    struct finalizable_object *fo;
1366
    unsigned long ready = 0;
×
1367

1368
    GC_log_printf("%lu finalization entries;"
×
1369
                  " %lu/%lu short/long disappearing links alive\n",
1370
                  (unsigned long)GC_fo_entries,
×
1371
                  (unsigned long)GC_dl_hashtbl.entries,
×
1372
                  (unsigned long)IF_LONG_REFS_PRESENT_ELSE(
×
1373
                                                GC_ll_hashtbl.entries, 0));
1374

1375
    for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
×
1376
      ++ready;
×
1377
    GC_log_printf("%lu finalization-ready objects;"
×
1378
                  " %ld/%ld short/long links cleared\n",
1379
                  ready,
1380
                  (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
×
1381
                  (long)IF_LONG_REFS_PRESENT_ELSE(
×
1382
                              GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
1383
  }
×
1384
#endif /* !SMALL_CONFIG */
1385

1386
#endif /* !GC_NO_FINALIZATION */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc