• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1490

28 Apr 2023 10:30AM UTC coverage: 72.923% (+0.001%) from 72.922%
1490

push

travis-ci-com

ivmai
Fix missing extern C for __asan_default_options
(fix of commit 84b695d01)

Issue #206 (bdwgc).

* os_dep.c [ADDRESS_SANITIZER && (UNIX_LIKE || NEED_FIND_LIMIT
|| MPROTECT_VDB) && !CUSTOM_ASAN_DEF_OPTIONS] (__asan_default_options):
Add the declaration wrapped into EXTERN_C_BEGIN.

7231 of 9916 relevant lines covered (72.92%)

11031094.35 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

76.15
/pthread_support.c
1
/*
2
 * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
3
 * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
4
 * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
5
 * Copyright (c) 2000-2005 by Hewlett-Packard Company.  All rights reserved.
6
 * Copyright (c) 2008-2021 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose,  provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/pthread_support.h"
19

20
/*
21
 * Support code originally for LinuxThreads, the clone()-based kernel
22
 * thread package for Linux which is included in libc6.
23
 *
24
 * This code no doubt makes some assumptions beyond what is
25
 * guaranteed by the pthread standard, though it now does
26
 * very little of that.  It now also supports NPTL, and many
27
 * other Posix thread implementations.  We are trying to merge
28
 * all flavors of pthread support code into this file.
29
 */
30

31
#if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
32

33
# include <stdlib.h>
34
# include <pthread.h>
35
# include <sched.h>
36
# include <time.h>
37
# include <errno.h>
38
# include <unistd.h>
39
# if !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
40
#   if !defined(GC_RTEMS_PTHREADS)
41
#     include <sys/mman.h>
42
#   endif
43
#   include <sys/time.h>
44
#   include <sys/types.h>
45
#   include <sys/stat.h>
46
#   include <fcntl.h>
47
# endif
48
# include <signal.h>
49

50
# include "gc_inline.h"
51

52
#if defined(GC_DARWIN_THREADS)
53
# include "private/darwin_semaphore.h"
54
#else
55
# include <semaphore.h>
56
#endif /* !GC_DARWIN_THREADS */
57

58
#if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
59
# include <sys/sysctl.h>
60
#endif /* GC_DARWIN_THREADS */
61

62
#if defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
63
# include <sys/param.h>
64
# include <sys/sysctl.h>
65
#endif /* GC_NETBSD_THREADS */
66

67
/* Allocator lock definitions.          */
68
#if !defined(USE_SPIN_LOCK)
69
  GC_INNER pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
70
#endif
71

72
#ifdef GC_ASSERTIONS
73
  GC_INNER unsigned long GC_lock_holder = NO_THREAD;
74
                /* Used only for assertions.    */
75
#endif
76

77
#if defined(GC_DGUX386_THREADS)
78
# include <sys/dg_sys_info.h>
79
# include <sys/_int_psem.h>
80
  /* sem_t is an uint in DG/UX */
81
  typedef unsigned int sem_t;
82
#endif /* GC_DGUX386_THREADS */
83

84
/* Undefine macros used to redirect pthread primitives. */
85
# undef pthread_create
86
# ifndef GC_NO_PTHREAD_SIGMASK
87
#   undef pthread_sigmask
88
# endif
89
# ifndef GC_NO_PTHREAD_CANCEL
90
#   undef pthread_cancel
91
# endif
92
# ifdef GC_HAVE_PTHREAD_EXIT
93
#   undef pthread_exit
94
# endif
95
# undef pthread_join
96
# undef pthread_detach
97
# if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
98
     && !defined(_PTHREAD_USE_PTDNAM_)
99
  /* Restore the original mangled names on Tru64 UNIX.  */
100
#   define pthread_create __pthread_create
101
#   define pthread_join __pthread_join
102
#   define pthread_detach __pthread_detach
103
#   ifndef GC_NO_PTHREAD_CANCEL
104
#     define pthread_cancel __pthread_cancel
105
#   endif
106
#   ifdef GC_HAVE_PTHREAD_EXIT
107
#     define pthread_exit __pthread_exit
108
#   endif
109
# endif
110

111
#ifdef GC_USE_LD_WRAP
112
#   define WRAP_FUNC(f) __wrap_##f
113
#   define REAL_FUNC(f) __real_##f
114
    int REAL_FUNC(pthread_create)(pthread_t *,
115
                                  GC_PTHREAD_CREATE_CONST pthread_attr_t *,
116
                                  void *(*start_routine)(void *), void *);
117
    int REAL_FUNC(pthread_join)(pthread_t, void **);
118
    int REAL_FUNC(pthread_detach)(pthread_t);
119
#   ifndef GC_NO_PTHREAD_SIGMASK
120
      int REAL_FUNC(pthread_sigmask)(int, const sigset_t *, sigset_t *);
121
#   endif
122
#   ifndef GC_NO_PTHREAD_CANCEL
123
      int REAL_FUNC(pthread_cancel)(pthread_t);
124
#   endif
125
#   ifdef GC_HAVE_PTHREAD_EXIT
126
      void REAL_FUNC(pthread_exit)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
127
#   endif
128
#else
129
#   ifdef GC_USE_DLOPEN_WRAP
130
#     include <dlfcn.h>
131
#     define WRAP_FUNC(f) f
132
#     define REAL_FUNC(f) GC_real_##f
133
      /* We define both GC_f and plain f to be the wrapped function.    */
134
      /* In that way plain calls work, as do calls from files that      */
135
      /* included gc.h, which redefined f to GC_f.                      */
136
      /* FIXME: Needs work for DARWIN and True64 (OSF1) */
137
      typedef int (* GC_pthread_create_t)(pthread_t *,
138
                                    GC_PTHREAD_CREATE_CONST pthread_attr_t *,
139
                                    void * (*)(void *), void *);
140
      static GC_pthread_create_t REAL_FUNC(pthread_create);
141
#     ifndef GC_NO_PTHREAD_SIGMASK
142
        typedef int (* GC_pthread_sigmask_t)(int, const sigset_t *,
143
                                             sigset_t *);
144
        static GC_pthread_sigmask_t REAL_FUNC(pthread_sigmask);
145
#     endif
146
      typedef int (* GC_pthread_join_t)(pthread_t, void **);
147
      static GC_pthread_join_t REAL_FUNC(pthread_join);
148
      typedef int (* GC_pthread_detach_t)(pthread_t);
149
      static GC_pthread_detach_t REAL_FUNC(pthread_detach);
150
#     ifndef GC_NO_PTHREAD_CANCEL
151
        typedef int (* GC_pthread_cancel_t)(pthread_t);
152
        static GC_pthread_cancel_t REAL_FUNC(pthread_cancel);
153
#     endif
154
#     ifdef GC_HAVE_PTHREAD_EXIT
155
        typedef void (* GC_pthread_exit_t)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
156
        static GC_pthread_exit_t REAL_FUNC(pthread_exit);
157
#     endif
158
#   else
159
#     define WRAP_FUNC(f) GC_##f
160
#     if !defined(GC_DGUX386_THREADS)
161
#       define REAL_FUNC(f) f
162
#     else /* GC_DGUX386_THREADS */
163
#       define REAL_FUNC(f) __d10_##f
164
#     endif /* GC_DGUX386_THREADS */
165
#   endif
166
#endif
167

168
#if defined(GC_USE_LD_WRAP) || defined(GC_USE_DLOPEN_WRAP)
169
  /* Define GC_ functions as aliases for the plain ones, which will     */
170
  /* be intercepted.  This allows files which include gc.h, and hence   */
171
  /* generate references to the GC_ symbols, to see the right symbols.  */
172
  GC_API int GC_pthread_create(pthread_t * t,
173
                               GC_PTHREAD_CREATE_CONST pthread_attr_t *a,
174
                               void * (* fn)(void *), void * arg)
175
  {
176
    return pthread_create(t, a, fn, arg);
177
  }
178

179
# ifndef GC_NO_PTHREAD_SIGMASK
180
    GC_API int GC_pthread_sigmask(int how, const sigset_t *mask,
181
                                  sigset_t *old)
182
    {
183
      return pthread_sigmask(how, mask, old);
184
    }
185
# endif /* !GC_NO_PTHREAD_SIGMASK */
186

187
  GC_API int GC_pthread_join(pthread_t t, void **res)
188
  {
189
    return pthread_join(t, res);
190
  }
191

192
  GC_API int GC_pthread_detach(pthread_t t)
193
  {
194
    return pthread_detach(t);
195
  }
196

197
# ifndef GC_NO_PTHREAD_CANCEL
198
    GC_API int GC_pthread_cancel(pthread_t t)
199
    {
200
      return pthread_cancel(t);
201
    }
202
# endif /* !GC_NO_PTHREAD_CANCEL */
203

204
# ifdef GC_HAVE_PTHREAD_EXIT
205
    GC_API GC_PTHREAD_EXIT_ATTRIBUTE void GC_pthread_exit(void *retval)
206
    {
207
      pthread_exit(retval);
208
    }
209
# endif
210
#endif /* Linker-based interception. */
211

212
#ifdef GC_USE_DLOPEN_WRAP
213
  STATIC GC_bool GC_syms_initialized = FALSE;
214

215
  STATIC void GC_init_real_syms(void)
216
  {
217
    void *dl_handle;
218

219
    if (GC_syms_initialized) return;
220
#   ifdef RTLD_NEXT
221
      dl_handle = RTLD_NEXT;
222
#   else
223
      dl_handle = dlopen("libpthread.so.0", RTLD_LAZY);
224
      if (NULL == dl_handle) {
225
        dl_handle = dlopen("libpthread.so", RTLD_LAZY); /* without ".0" */
226
      }
227
      if (NULL == dl_handle) ABORT("Couldn't open libpthread");
228
#   endif
229
    REAL_FUNC(pthread_create) = (GC_pthread_create_t)(word)
230
                                dlsym(dl_handle, "pthread_create");
231
#   ifdef RTLD_NEXT
232
      if (REAL_FUNC(pthread_create) == 0)
233
        ABORT("pthread_create not found"
234
              " (probably -lgc is specified after -lpthread)");
235
#   endif
236
#   ifndef GC_NO_PTHREAD_SIGMASK
237
      REAL_FUNC(pthread_sigmask) = (GC_pthread_sigmask_t)(word)
238
                                dlsym(dl_handle, "pthread_sigmask");
239
#   endif
240
    REAL_FUNC(pthread_join) = (GC_pthread_join_t)(word)
241
                                dlsym(dl_handle, "pthread_join");
242
    REAL_FUNC(pthread_detach) = (GC_pthread_detach_t)(word)
243
                                  dlsym(dl_handle, "pthread_detach");
244
#   ifndef GC_NO_PTHREAD_CANCEL
245
      REAL_FUNC(pthread_cancel) = (GC_pthread_cancel_t)(word)
246
                                    dlsym(dl_handle, "pthread_cancel");
247
#   endif
248
#   ifdef GC_HAVE_PTHREAD_EXIT
249
      REAL_FUNC(pthread_exit) = (GC_pthread_exit_t)(word)
250
                                  dlsym(dl_handle, "pthread_exit");
251
#   endif
252
    GC_syms_initialized = TRUE;
253
  }
254

255
# define INIT_REAL_SYMS() if (EXPECT(GC_syms_initialized, TRUE)) {} \
256
                            else GC_init_real_syms()
257
#else
258
# define INIT_REAL_SYMS() (void)0
259
#endif
260

261
static GC_bool parallel_initialized = FALSE;
262

263
#ifndef GC_ALWAYS_MULTITHREADED
264
  GC_INNER GC_bool GC_need_to_lock = FALSE;
265
#endif
266

267
STATIC int GC_nprocs = 1;
268
                        /* Number of processors.  We may not have       */
269
                        /* access to all of them, but this is as good   */
270
                        /* a guess as any ...                           */
271

272
#ifdef THREAD_LOCAL_ALLOC
273
  /* We must explicitly mark ptrfree and gcj free lists, since the free */
274
  /* list links wouldn't otherwise be found.  We also set them in the   */
275
  /* normal free lists, since that involves touching less memory than   */
276
  /* if we scanned them normally.                                       */
277
  GC_INNER void GC_mark_thread_local_free_lists(void)
24,115✔
278
  {
279
    int i;
280
    GC_thread p;
281

282
    for (i = 0; i < THREAD_TABLE_SZ; ++i) {
6,197,555✔
283
      for (p = GC_threads[i]; 0 != p; p = p -> next) {
6,264,448✔
284
        if (!(p -> flags & FINISHED))
91,008✔
285
          GC_mark_thread_local_fls_for(&(p->tlfs));
83,210✔
286
      }
287
    }
288
  }
24,115✔
289

290
# if defined(GC_ASSERTIONS)
291
    /* Check that all thread-local free-lists are completely marked.    */
292
    /* Also check that thread-specific-data structures are marked.      */
293
    void GC_check_tls(void)
24,117✔
294
    {
295
        int i;
296
        GC_thread p;
297

298
        for (i = 0; i < THREAD_TABLE_SZ; ++i) {
6,198,069✔
299
          for (p = GC_threads[i]; 0 != p; p = p -> next) {
6,264,998✔
300
            if (!(p -> flags & FINISHED))
91,046✔
301
              GC_check_tls_for(&(p->tlfs));
83,248✔
302
          }
303
        }
304
#       if defined(USE_CUSTOM_SPECIFIC)
305
          if (GC_thread_key != 0)
24,117✔
306
            GC_check_tsd_marks(GC_thread_key);
24,086✔
307
#       endif
308
    }
24,117✔
309
# endif /* GC_ASSERTIONS */
310

311
#endif /* THREAD_LOCAL_ALLOC */
312

313
# ifndef MAX_MARKERS
314
#   define MAX_MARKERS 16
315
# endif
316

317
#ifdef PARALLEL_MARK
318

319
static ptr_t marker_sp[MAX_MARKERS - 1] = {0};
320
#ifdef IA64
321
  static ptr_t marker_bsp[MAX_MARKERS - 1] = {0};
322
#endif
323

324
#if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
325
  static mach_port_t marker_mach_threads[MAX_MARKERS - 1] = {0};
326

327
  /* Used only by GC_suspend_thread_list().     */
328
  GC_INNER GC_bool GC_is_mach_marker(thread_act_t thread)
329
  {
330
    int i;
331
    for (i = 0; i < GC_markers_m1; i++) {
332
      if (marker_mach_threads[i] == thread)
333
        return TRUE;
334
    }
335
    return FALSE;
336
  }
337
#endif /* GC_DARWIN_THREADS */
338

339
#ifdef HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG /* NetBSD */
340
  static void set_marker_thread_name(unsigned id)
341
  {
342
    int err = pthread_setname_np(pthread_self(), "GC-marker-%zu",
343
                                 (void*)(size_t)id);
344
    if (err != 0)
345
      WARN("pthread_setname_np failed, errno= %" WARN_PRIdPTR "\n",
346
           (signed_word)err);
347
  }
348
#elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID) \
349
      || defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
350
  static void set_marker_thread_name(unsigned id)
55✔
351
  {
352
    char name_buf[16];  /* pthread_setname_np may fail for longer names */
353
    int len = sizeof("GC-marker-") - 1;
55✔
354

355
    /* Compose the name manually as snprintf may be unavailable or      */
356
    /* "%u directive output may be truncated" warning may occur.        */
357
    BCOPY("GC-marker-", name_buf, len);
55✔
358
    if (id >= 10)
55✔
359
      name_buf[len++] = (char)('0' + (id / 10) % 10);
×
360
    name_buf[len] = (char)('0' + id % 10);
55✔
361
    name_buf[len + 1] = '\0';
55✔
362

363
#   ifdef HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID /* iOS, OS X */
364
      (void)pthread_setname_np(name_buf);
365
#   else /* Linux, Solaris, etc. */
366
      if (pthread_setname_np(pthread_self(), name_buf) != 0)
55✔
367
        WARN("pthread_setname_np failed\n", 0);
×
368
#   endif
369
  }
55✔
370
#else
371
# define set_marker_thread_name(id) (void)(id)
372
#endif
373

374
STATIC void * GC_mark_thread(void * id)
55✔
375
{
376
  word my_mark_no = 0;
55✔
377
  IF_CANCEL(int cancel_state;)
378

379
  if ((word)id == GC_WORD_MAX) return 0; /* to prevent a compiler warning */
55✔
380
  DISABLE_CANCEL(cancel_state);
55✔
381
                         /* Mark threads are not cancellable; they      */
382
                         /* should be invisible to client.              */
383
  set_marker_thread_name((unsigned)(word)id);
55✔
384
  marker_sp[(word)id] = GC_approx_sp();
55✔
385
# ifdef IA64
386
    marker_bsp[(word)id] = GC_save_regs_in_stack();
387
# endif
388
# if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
389
    marker_mach_threads[(word)id] = mach_thread_self();
390
# endif
391

392
  /* Inform GC_start_mark_threads about completion of marker data init. */
393
  GC_acquire_mark_lock();
55✔
394
  if (0 == --GC_fl_builder_count) /* count may have a negative value */
55✔
395
    GC_notify_all_builder();
55✔
396

397
  for (;; ++my_mark_no) {
5,911✔
398
    /* GC_mark_no is passed only to allow GC_help_marker to terminate   */
399
    /* promptly.  This is important if it were called from the signal   */
400
    /* handler or from the GC lock acquisition code.  Under Linux, it's */
401
    /* not safe to call it from a signal handler, since it uses mutexes */
402
    /* and condition variables.  Since it is called only here, the      */
403
    /* argument is unnecessary.                                         */
404
    if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
5,966✔
405
        /* resynchronize if we get far off, e.g. because GC_mark_no     */
406
        /* wrapped.                                                     */
407
        my_mark_no = GC_mark_no;
76✔
408
    }
409
#   ifdef DEBUG_THREADS
410
      GC_log_printf("Starting mark helper for mark number %lu\n",
411
                    (unsigned long)my_mark_no);
412
#   endif
413
    GC_help_marker(my_mark_no);
5,966✔
414
  }
5,911✔
415
}
416

417
STATIC pthread_t GC_mark_threads[MAX_MARKERS];
418

419
#ifdef GLIBC_2_1_MUTEX_HACK
420
  /* Ugly workaround for a linux threads bug in the final versions      */
421
  /* of glibc2.1.  Pthread_mutex_trylock sets the mutex owner           */
422
  /* field even when it fails to acquire the mutex.  This causes        */
423
  /* pthread_cond_wait to die.  Remove for glibc2.2.                    */
424
  /* According to the man page, we should use                           */
425
  /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually   */
426
  /* defined.                                                           */
427
  static pthread_mutex_t mark_mutex =
428
        {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
429
#else
430
  static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
431
#endif
432

433
static int available_markers_m1 = 0;
434

435
#ifdef CAN_HANDLE_FORK
436
  static pthread_cond_t mark_cv;
437
                        /* initialized by GC_start_mark_threads_inner   */
438
#else
439
  static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
440
#endif
441

442
STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all);
443

444
GC_INNER void GC_start_mark_threads_inner(void)
55✔
445
{
446
    int i;
447
    pthread_attr_t attr;
448
#   ifndef NO_MARKER_SPECIAL_SIGMASK
449
      sigset_t set, oldset;
450
#   endif
451

452
    GC_ASSERT(I_HOLD_LOCK());
55✔
453
    ASSERT_CANCEL_DISABLED();
55✔
454
    if (available_markers_m1 <= 0 || GC_parallel) return;
55✔
455
                /* Skip if parallel markers disabled or already started. */
456
    GC_wait_for_gc_completion(TRUE);
55✔
457

458
#   ifdef CAN_HANDLE_FORK
459
      /* Initialize mark_cv (for the first time), or cleanup its value  */
460
      /* after forking in the child process.  All the marker threads in */
461
      /* the parent process were blocked on this variable at fork, so   */
462
      /* pthread_cond_wait() malfunction (hang) is possible in the      */
463
      /* child process without such a cleanup.                          */
464
      /* TODO: This is not portable, it is better to shortly unblock    */
465
      /* all marker threads in the parent process at fork.              */
466
      {
467
        pthread_cond_t mark_cv_local = PTHREAD_COND_INITIALIZER;
55✔
468
        BCOPY(&mark_cv_local, &mark_cv, sizeof(mark_cv));
55✔
469
      }
470
#   endif
471

472
    GC_ASSERT(GC_fl_builder_count == 0);
55✔
473
    INIT_REAL_SYMS(); /* for pthread_create */
474
    if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
55✔
475
    if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
55✔
476
        ABORT("pthread_attr_setdetachstate failed");
×
477

478
#   ifdef DEFAULT_STACK_MAYBE_SMALL
479
      /* Default stack size is usually too small: increase it.  */
480
      /* Otherwise marker threads or GC may run out of space.   */
481
      {
482
        size_t old_size;
483

484
        if (pthread_attr_getstacksize(&attr, &old_size) != 0)
485
          ABORT("pthread_attr_getstacksize failed");
486
        if (old_size < MIN_STACK_SIZE
487
            && old_size != 0 /* stack size is known */) {
488
          if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
489
            ABORT("pthread_attr_setstacksize failed");
490
        }
491
      }
492
#   endif /* DEFAULT_STACK_MAYBE_SMALL */
493

494
#   ifndef NO_MARKER_SPECIAL_SIGMASK
495
      /* Apply special signal mask to GC marker threads, and don't drop */
496
      /* user defined signals by GC marker threads.                     */
497
      if (sigfillset(&set) != 0)
55✔
498
        ABORT("sigfillset failed");
×
499

500
#     if !defined(GC_DARWIN_THREADS) && !defined(GC_OPENBSD_UTHREADS) \
501
         && !defined(NACL)
502
        /* These are used by GC to stop and restart the world.  */
503
        if (sigdelset(&set, GC_get_suspend_signal()) != 0
55✔
504
            || sigdelset(&set, GC_get_thr_restart_signal()) != 0)
55✔
505
          ABORT("sigdelset failed");
×
506
#     endif
507

508
      if (REAL_FUNC(pthread_sigmask)(SIG_BLOCK, &set, &oldset) < 0) {
55✔
509
        WARN("pthread_sigmask set failed, no markers started\n", 0);
×
510
        GC_markers_m1 = 0;
×
511
        (void)pthread_attr_destroy(&attr);
×
512
        return;
×
513
      }
514
#   endif /* !NO_MARKER_SPECIAL_SIGMASK */
515

516
    /* To have proper GC_parallel value in GC_help_marker.      */
517
    GC_markers_m1 = available_markers_m1;
55✔
518

519
    for (i = 0; i < available_markers_m1; ++i) {
110✔
520
      if (0 != REAL_FUNC(pthread_create)(GC_mark_threads + i, &attr,
55✔
521
                              GC_mark_thread, (void *)(word)i)) {
55✔
522
        WARN("Marker thread creation failed\n", 0);
×
523
        /* Don't try to create other marker threads.    */
524
        GC_markers_m1 = i;
×
525
        break;
×
526
      }
527
    }
528

529
#   ifndef NO_MARKER_SPECIAL_SIGMASK
530
      /* Restore previous signal mask.  */
531
      if (REAL_FUNC(pthread_sigmask)(SIG_SETMASK, &oldset, NULL) < 0) {
55✔
532
        WARN("pthread_sigmask restore failed\n", 0);
×
533
      }
534
#   endif
535

536
    (void)pthread_attr_destroy(&attr);
55✔
537
    GC_wait_for_markers_init();
55✔
538
    GC_COND_LOG_PRINTF("Started %d mark helper threads\n", GC_markers_m1);
55✔
539
}
540

541
#endif /* PARALLEL_MARK */
542

543
GC_INNER GC_bool GC_thr_initialized = FALSE;
544

545
GC_INNER volatile GC_thread GC_threads[THREAD_TABLE_SZ] = {0};
546

547
/* It may not be safe to allocate when we register the first thread.    */
548
/* As "next" and "status" fields are unused, no need to push this       */
549
/* (but "backing_store_end" field should be pushed on E2K).             */
550
static struct GC_Thread_Rep first_thread;
551

552
void GC_push_thread_structures(void)
×
553
{
554
    GC_ASSERT(I_HOLD_LOCK());
×
555
    GC_PUSH_ALL_SYM(GC_threads);
×
556
#   ifdef E2K
557
      GC_PUSH_ALL_SYM(first_thread.backing_store_end);
558
#   endif
559
#   if defined(THREAD_LOCAL_ALLOC) && defined(USE_CUSTOM_SPECIFIC)
560
      GC_PUSH_ALL_SYM(GC_thread_key);
×
561
#   endif
562
}
×
563

564
#ifdef DEBUG_THREADS
565
  STATIC int GC_count_threads(void)
566
  {
567
    int i;
568
    int count = 0;
569
    GC_ASSERT(I_HOLD_LOCK());
570
    for (i = 0; i < THREAD_TABLE_SZ; ++i) {
571
        GC_thread th = GC_threads[i];
572
        while (th) {
573
            if (!(th->flags & FINISHED))
574
                ++count;
575
            th = th->next;
576
        }
577
    }
578
    return count;
579
  }
580
#endif /* DEBUG_THREADS */
581

582
/* Add a thread to GC_threads.  We assume it wasn't already there.      */
583
/* Caller holds allocation lock.                                        */
584
STATIC GC_thread GC_new_thread(pthread_t id)
25,162✔
585
{
586
    int hv = THREAD_TABLE_INDEX(id);
25,162✔
587
    GC_thread result;
588
    static GC_bool first_thread_used = FALSE;
589

590
#   ifdef DEBUG_THREADS
591
        GC_log_printf("Creating thread %p\n", (void *)id);
592
        for (result = GC_threads[hv]; result != NULL; result = result->next)
593
          if (!THREAD_EQUAL(result->id, id)) {
594
            GC_log_printf("Hash collision at GC_threads[%d]\n", hv);
595
            break;
596
          }
597
#   endif
598
    GC_ASSERT(I_HOLD_LOCK());
25,162✔
599
    if (!EXPECT(first_thread_used, TRUE)) {
25,162✔
600
        result = &first_thread;
31✔
601
        first_thread_used = TRUE;
31✔
602
        GC_ASSERT(NULL == GC_threads[hv]);
31✔
603
#       if defined(THREAD_SANITIZER) && defined(CPPCHECK)
604
          GC_noop1((unsigned char)result->dummy[0]);
605
#       endif
606
    } else {
607
        result = (struct GC_Thread_Rep *)
25,131✔
608
                 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
609
        if (result == 0) return(0);
25,131✔
610
    }
611
    result -> id = id;
25,162✔
612
#   ifdef USE_TKILL_ON_ANDROID
613
      result -> kernel_id = gettid();
614
#   endif
615
    result -> next = GC_threads[hv];
25,162✔
616
    GC_threads[hv] = result;
25,162✔
617
#   ifdef NACL
618
      GC_nacl_gc_thread_self = result;
619
      GC_nacl_initialize_gc_thread();
620
#   endif
621
    GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
25,162✔
622
    if (EXPECT(result != &first_thread, TRUE))
25,162✔
623
      GC_dirty(result);
25,131✔
624
    return(result);
25,162✔
625
}
626

627
/* Delete a thread from GC_threads.  We assume it is there.     */
628
/* (The code intentionally traps if it wasn't.)                 */
629
/* It is safe to delete the main thread.                        */
630
STATIC void GC_delete_thread(pthread_t id)
22,761✔
631
{
632
    int hv = THREAD_TABLE_INDEX(id);
22,761✔
633
    GC_thread p = GC_threads[hv];
22,761✔
634
    GC_thread prev = NULL;
22,761✔
635

636
#   ifdef DEBUG_THREADS
637
      GC_log_printf("Deleting thread %p, n_threads= %d\n",
638
                    (void *)id, GC_count_threads());
639
#   endif
640
    GC_ASSERT(I_HOLD_LOCK());
22,761✔
641
    while (!THREAD_EQUAL(p -> id, id)) {
46,631✔
642
        prev = p;
1,109✔
643
        p = p -> next;
1,109✔
644
    }
645
    if (prev == 0) {
22,761✔
646
        GC_threads[hv] = p -> next;
21,699✔
647
    } else {
648
        GC_ASSERT(prev != &first_thread);
1,062✔
649
        prev -> next = p -> next;
1,062✔
650
        GC_dirty(prev);
1,062✔
651
    }
652
    if (p != &first_thread) {
22,761✔
653
#     ifdef GC_DARWIN_THREADS
654
        mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
655
#     endif
656
      GC_INTERNAL_FREE(p);
22,759✔
657
    }
658
}
22,761✔
659

660
/* If a thread has been joined, but we have not yet             */
661
/* been notified, then there may be more than one thread        */
662
/* in the table with the same pthread id.                       */
663
/* This is OK, but we need a way to delete a specific one.      */
664
STATIC void GC_delete_gc_thread(GC_thread t)
2,248✔
665
{
666
    pthread_t id = t -> id;
2,248✔
667
    int hv = THREAD_TABLE_INDEX(id);
2,248✔
668
    GC_thread p = GC_threads[hv];
2,248✔
669
    GC_thread prev = NULL;
2,248✔
670

671
    GC_ASSERT(I_HOLD_LOCK());
2,248✔
672
    while (p != t) {
4,630✔
673
        prev = p;
134✔
674
        p = p -> next;
134✔
675
    }
676
    if (prev == 0) {
2,248✔
677
        GC_threads[hv] = p -> next;
2,114✔
678
    } else {
679
        GC_ASSERT(prev != &first_thread);
134✔
680
        prev -> next = p -> next;
134✔
681
        GC_dirty(prev);
134✔
682
    }
683
#   ifdef GC_DARWIN_THREADS
684
        mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
685
#   endif
686
    GC_INTERNAL_FREE(p);
2,248✔
687

688
#   ifdef DEBUG_THREADS
689
      GC_log_printf("Deleted thread %p, n_threads= %d\n",
690
                    (void *)id, GC_count_threads());
691
#   endif
692
}
2,248✔
693

694
/* Return a GC_thread corresponding to a given pthread_t.       */
695
/* Returns 0 if it's not there.                                 */
696
/* Caller holds allocation lock or otherwise inhibits           */
697
/* updates.                                                     */
698
/* If there is more than one thread with the given id we        */
699
/* return the most recent one.                                  */
700
GC_INNER GC_thread GC_lookup_thread(pthread_t id)
179,424,827✔
701
{
702
    GC_thread p = GC_threads[THREAD_TABLE_INDEX(id)];
179,424,827✔
703

704
    while (p != 0 && !THREAD_EQUAL(p -> id, id)) p = p -> next;
179,424,827✔
705
    return(p);
179,424,827✔
706
}
707

708
#ifndef GC_NO_FINALIZATION
709
  /* Called by GC_finalize() (in case of an allocation failure observed). */
710
  GC_INNER void GC_reset_finalizer_nested(void)
×
711
  {
712
    GC_thread me = GC_lookup_thread(pthread_self());
×
713

714
    me->finalizer_nested = 0;
×
715
  }
×
716

717
  /* Checks and updates the thread-local level of finalizers recursion. */
718
  /* Returns NULL if GC_invoke_finalizers() should not be called by the */
719
  /* collector (to minimize the risk of a deep finalizers recursion),   */
720
  /* otherwise returns a pointer to the thread-local finalizer_nested.  */
721
  /* Called by GC_notify_or_invoke_finalizers() only (the GC lock is    */
722
  /* held).                                                             */
723
  GC_INNER unsigned char *GC_check_finalizer_nested(void)
2,530✔
724
  {
725
    GC_thread me = GC_lookup_thread(pthread_self());
2,530✔
726
    unsigned nesting_level = me->finalizer_nested;
2,530✔
727

728
    if (nesting_level) {
2,530✔
729
      /* We are inside another GC_invoke_finalizers().          */
730
      /* Skip some implicitly-called GC_invoke_finalizers()     */
731
      /* depending on the nesting (recursion) level.            */
732
      if (++me->finalizer_skipped < (1U << nesting_level)) return NULL;
×
733
      me->finalizer_skipped = 0;
×
734
    }
735
    me->finalizer_nested = (unsigned char)(nesting_level + 1);
2,530✔
736
    return &me->finalizer_nested;
2,530✔
737
  }
738
#endif /* !GC_NO_FINALIZATION */
739

740
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
741
  /* This is called from thread-local GC_malloc(). */
742
  GC_bool GC_is_thread_tsd_valid(void *tsd)
178,317,313✔
743
  {
744
    GC_thread me;
745
    DCL_LOCK_STATE;
746

747
    LOCK();
178,317,313✔
748
    me = GC_lookup_thread(pthread_self());
178,284,006✔
749
    UNLOCK();
179,323,804✔
750
    return (word)tsd >= (word)(&me->tlfs)
358,646,960✔
751
            && (word)tsd < (word)(&me->tlfs) + sizeof(me->tlfs);
179,323,480✔
752
  }
753
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
754

755
GC_API int GC_CALL GC_thread_is_registered(void)
50✔
756
{
757
    pthread_t self = pthread_self();
50✔
758
    GC_thread me;
759
    DCL_LOCK_STATE;
760

761
    LOCK();
50✔
762
    me = GC_lookup_thread(self);
52✔
763
    UNLOCK();
52✔
764
    return me != NULL && !(me -> flags & FINISHED);
52✔
765
}
766

767
static pthread_t main_pthread_id;
768
static void *main_stack, *main_altstack;
769
static word main_stack_size, main_altstack_size;
770

771
GC_API void GC_CALL GC_register_altstack(void *stack, GC_word stack_size,
×
772
                                         void *altstack,
773
                                         GC_word altstack_size)
774
{
775
  GC_thread me;
776
  pthread_t self = pthread_self();
×
777
  DCL_LOCK_STATE;
778

779
  LOCK();
×
780
  me = GC_lookup_thread(self);
×
781
  if (me != NULL) {
×
782
    me->stack = (ptr_t)stack;
×
783
    me->stack_size = stack_size;
×
784
    me->altstack = (ptr_t)altstack;
×
785
    me->altstack_size = altstack_size;
×
786
  } else {
787
    /* This happens if we are called before GC_thr_init.    */
788
    main_pthread_id = self;
×
789
    main_stack = stack;
×
790
    main_stack_size = stack_size;
×
791
    main_altstack = altstack;
×
792
    main_altstack_size = altstack_size;
×
793
  }
794
  UNLOCK();
×
795
}
×
796

797
#ifdef CAN_HANDLE_FORK
798

799
  /* Prevent TSan false positive about the race during items removal    */
800
  /* from GC_threads.  (The race cannot happen since only one thread    */
801
  /* survives in the child.)                                            */
802
# ifdef CAN_CALL_ATFORK
803
    GC_ATTR_NO_SANITIZE_THREAD
804
# endif
805
  static void store_to_threads_table(int hv, GC_thread me)
10,752✔
806
  {
807
    GC_threads[hv] = me;
10,752✔
808
  }
10,752✔
809

810
/* Remove all entries from the GC_threads table, except the     */
811
/* one for the current thread.  We need to do this in the child */
812
/* process after a fork(), since only the current thread        */
813
/* survives in the child.                                       */
814
STATIC void GC_remove_all_threads_but_me(void)
42✔
815
{
816
    pthread_t self = pthread_self();
42✔
817
    int hv;
818

819
    for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
10,794✔
820
      GC_thread p, next;
821
      GC_thread me = NULL;
10,752✔
822

823
      for (p = GC_threads[hv]; 0 != p; p = next) {
11,624✔
824
        next = p -> next;
872✔
825
        if (THREAD_EQUAL(p -> id, self)
872✔
826
            && me == NULL) { /* ignore dead threads with the same id */
42✔
827
          me = p;
42✔
828
          p -> next = 0;
42✔
829
#         ifdef GC_DARWIN_THREADS
830
            /* Update thread Id after fork (it is OK to call    */
831
            /* GC_destroy_thread_local and GC_free_inner        */
832
            /* before update).                                  */
833
            me -> stop_info.mach_thread = mach_thread_self();
834
#         endif
835
#         ifdef USE_TKILL_ON_ANDROID
836
            me -> kernel_id = gettid();
837
#         endif
838
#         if defined(THREAD_LOCAL_ALLOC) && !defined(USE_CUSTOM_SPECIFIC)
839
          {
840
            int res;
841

842
            /* Some TLS implementations might be not fork-friendly, so  */
843
            /* we re-assign thread-local pointer to 'tlfs' for safety   */
844
            /* instead of the assertion check (again, it is OK to call  */
845
            /* GC_destroy_thread_local and GC_free_inner before).       */
846
            res = GC_setspecific(GC_thread_key, &me->tlfs);
847
            if (COVERT_DATAFLOW(res) != 0)
848
              ABORT("GC_setspecific failed (in child)");
849
          }
850
#         endif
851
        } else {
852
#         ifdef THREAD_LOCAL_ALLOC
853
            if (!(p -> flags & FINISHED)) {
830✔
854
              /* Cannot call GC_destroy_thread_local here.  The free    */
855
              /* lists may be in an inconsistent state (as thread p may */
856
              /* be updating one of the lists by GC_generic_malloc_many */
857
              /* or GC_FAST_MALLOC_GRANS when fork is invoked).         */
858
              /* This should not be a problem because the lost elements */
859
              /* of the free lists will be collected during GC.         */
860
              GC_remove_specific_after_fork(GC_thread_key, p -> id);
830✔
861
            }
862
#         endif
863
          /* TODO: To avoid TSan hang (when updating GC_bytes_freed),   */
864
          /* we just skip explicit freeing of GC_threads entries.       */
865
#         if !defined(THREAD_SANITIZER) || !defined(CAN_CALL_ATFORK)
866
            if (p != &first_thread) GC_INTERNAL_FREE(p);
830✔
867
#         endif
868
        }
869
      }
870
      store_to_threads_table(hv, me);
10,752✔
871
    }
872
}
42✔
873
#endif /* CAN_HANDLE_FORK */
874

875
#ifdef USE_PROC_FOR_LIBRARIES
876
  GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
877
  {
878
    int i;
879
    GC_thread p;
880

881
    GC_ASSERT(I_HOLD_LOCK());
882
#   ifdef PARALLEL_MARK
883
      for (i = 0; i < GC_markers_m1; ++i) {
884
        if ((word)marker_sp[i] > (word)lo && (word)marker_sp[i] < (word)hi)
885
          return TRUE;
886
#       ifdef IA64
887
          if ((word)marker_bsp[i] > (word)lo
888
              && (word)marker_bsp[i] < (word)hi)
889
            return TRUE;
890
#       endif
891
      }
892
#   endif
893
    for (i = 0; i < THREAD_TABLE_SZ; i++) {
894
      for (p = GC_threads[i]; p != 0; p = p -> next) {
895
        if (0 != p -> stack_end) {
896
#         ifdef STACK_GROWS_UP
897
            if ((word)p->stack_end >= (word)lo
898
                && (word)p->stack_end < (word)hi)
899
              return TRUE;
900
#         else /* STACK_GROWS_DOWN */
901
            if ((word)p->stack_end > (word)lo
902
                && (word)p->stack_end <= (word)hi)
903
              return TRUE;
904
#         endif
905
        }
906
      }
907
    }
908
    return FALSE;
909
  }
910
#endif /* USE_PROC_FOR_LIBRARIES */
911

912
#if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
913
    && defined(IA64)
914
  /* Find the largest stack_base smaller than bound.  May be used       */
915
  /* to find the boundary between a register stack and adjacent         */
916
  /* immediately preceding memory stack.                                */
917
  GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound)
918
  {
919
    int i;
920
    GC_thread p;
921
    ptr_t result = 0;
922

923
    GC_ASSERT(I_HOLD_LOCK());
924
#   ifdef PARALLEL_MARK
925
      for (i = 0; i < GC_markers_m1; ++i) {
926
        if ((word)marker_sp[i] > (word)result
927
            && (word)marker_sp[i] < (word)bound)
928
          result = marker_sp[i];
929
      }
930
#   endif
931
    for (i = 0; i < THREAD_TABLE_SZ; i++) {
932
      for (p = GC_threads[i]; p != 0; p = p -> next) {
933
        if ((word)p->stack_end > (word)result
934
            && (word)p->stack_end < (word)bound) {
935
          result = p -> stack_end;
936
        }
937
      }
938
    }
939
    return result;
940
  }
941
#endif /* IA64 */
942

943
#ifndef STAT_READ
944
# define STAT_READ read
945
        /* If read is wrapped, this may need to be redefined to call    */
946
        /* the real one.                                                */
947
#endif
948

949
#ifdef GC_HPUX_THREADS
950
# define GC_get_nprocs() pthread_num_processors_np()
951

952
#elif defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \
953
      || defined(GC_HAIKU_THREADS) || defined(GC_SOLARIS_THREADS) \
954
      || defined(HURD) || defined(HOST_ANDROID) || defined(NACL)
955
  GC_INLINE int GC_get_nprocs(void)
956
  {
957
    int nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
958
    return nprocs > 0 ? nprocs : 1; /* ignore error silently */
959
  }
960

961
#elif defined(GC_IRIX_THREADS)
962
  GC_INLINE int GC_get_nprocs(void)
963
  {
964
    int nprocs = (int)sysconf(_SC_NPROC_ONLN);
965
    return nprocs > 0 ? nprocs : 1; /* ignore error silently */
966
  }
967

968
#elif defined(GC_LINUX_THREADS) /* && !HOST_ANDROID && !NACL */
969
  /* Return the number of processors. */
970
  STATIC int GC_get_nprocs(void)
31✔
971
  {
972
    /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that     */
973
    /* appears to be buggy in many cases.                             */
974
    /* We look for lines "cpu<n>" in /proc/stat.                      */
975
#   define PROC_STAT_BUF_SZ ((1 + MAX_MARKERS) * 100) /* should be enough */
976
    /* No need to read the entire /proc/stat to get maximum cpu<N> as   */
977
    /* - the requested lines are located at the beginning of the file;  */
978
    /* - the lines with cpu<N> where N > MAX_MARKERS are not needed.    */
979
    char stat_buf[PROC_STAT_BUF_SZ+1];
980
    int f;
981
    int result, i, len;
982

983
    f = open("/proc/stat", O_RDONLY);
31✔
984
    if (f < 0) {
31✔
985
      WARN("Could not open /proc/stat\n", 0);
×
986
      return 1; /* assume an uniprocessor */
31✔
987
    }
988
    len = STAT_READ(f, stat_buf, sizeof(stat_buf)-1);
31✔
989
    /* Unlikely that we need to retry because of an incomplete read here. */
990
    if (len < 0) {
31✔
991
      WARN("Failed to read /proc/stat, errno= %" WARN_PRIdPTR "\n",
×
992
           (signed_word)errno);
993
      close(f);
×
994
      return 1;
×
995
    }
996
    stat_buf[len] = '\0'; /* to avoid potential buffer overrun by atoi() */
31✔
997
    close(f);
31✔
998

999
    result = 1;
31✔
1000
        /* Some old kernels only have a single "cpu nnnn ..."   */
1001
        /* entry in /proc/stat.  We identify those as           */
1002
        /* uniprocessors.                                       */
1003

1004
    for (i = 0; i < len - 4; ++i) {
35,958✔
1005
      if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
35,927✔
1006
          && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
93✔
1007
        int cpu_no = atoi(&stat_buf[i + 4]);
62✔
1008
        if (cpu_no >= result)
62✔
1009
          result = cpu_no + 1;
31✔
1010
      }
1011
    }
1012
    return result;
31✔
1013
  }
1014

1015
#elif defined(GC_DGUX386_THREADS)
1016
  /* Return the number of processors, or i <= 0 if it can't be determined. */
1017
  STATIC int GC_get_nprocs(void)
1018
  {
1019
    int numCpus;
1020
    struct dg_sys_info_pm_info pm_sysinfo;
1021
    int status = 0;
1022

1023
    status = dg_sys_info((long int *) &pm_sysinfo,
1024
        DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
1025
    if (status < 0)
1026
       /* set -1 for error */
1027
       numCpus = -1;
1028
    else
1029
      /* Active CPUs */
1030
      numCpus = pm_sysinfo.idle_vp_count;
1031
    return(numCpus);
1032
  }
1033

1034
#elif defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) \
1035
      || defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
1036
  STATIC int GC_get_nprocs(void)
1037
  {
1038
    int mib[] = {CTL_HW,HW_NCPU};
1039
    int res;
1040
    size_t len = sizeof(res);
1041

1042
    sysctl(mib, sizeof(mib)/sizeof(int), &res, &len, NULL, 0);
1043
    return res;
1044
  }
1045

1046
#else
1047
  /* E.g., GC_RTEMS_PTHREADS */
1048
# define GC_get_nprocs() 1 /* not implemented */
1049
#endif /* !GC_LINUX_THREADS && !GC_DARWIN_THREADS && ... */
1050

1051
#if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
1052
  /* Some buggy Linux/arm kernels show only non-sleeping CPUs in        */
1053
  /* /proc/stat (and /proc/cpuinfo), so another data system source is   */
1054
  /* tried first.  Result <= 0 on error.                                */
1055
  STATIC int GC_get_nprocs_present(void)
1056
  {
1057
    char stat_buf[16];
1058
    int f;
1059
    int len;
1060

1061
    f = open("/sys/devices/system/cpu/present", O_RDONLY);
1062
    if (f < 0)
1063
      return -1; /* cannot open the file */
1064

1065
    len = STAT_READ(f, stat_buf, sizeof(stat_buf));
1066
    close(f);
1067

1068
    /* Recognized file format: "0\n" or "0-<max_cpu_id>\n"      */
1069
    /* The file might probably contain a comma-separated list   */
1070
    /* but we do not need to handle it (just silently ignore).  */
1071
    if (len < 2 || stat_buf[0] != '0' || stat_buf[len - 1] != '\n') {
1072
      return 0; /* read error or unrecognized content */
1073
    } else if (len == 2) {
1074
      return 1; /* an uniprocessor */
1075
    } else if (stat_buf[1] != '-') {
1076
      return 0; /* unrecognized content */
1077
    }
1078

1079
    stat_buf[len - 1] = '\0'; /* terminate the string */
1080
    return atoi(&stat_buf[2]) + 1; /* skip "0-" and parse max_cpu_num */
1081
  }
1082
#endif /* ARM32 && GC_LINUX_THREADS && !NACL */
1083

1084
#if defined(CAN_HANDLE_FORK) && defined(THREAD_SANITIZER)
1085
# include "private/gc_pmark.h" /* for MS_NONE */
1086

1087
  /* Workaround for TSan which does not notice that the GC lock */
1088
  /* is acquired in fork_prepare_proc().                        */
1089
  GC_ATTR_NO_SANITIZE_THREAD
1090
  static GC_bool collection_in_progress(void)
1091
  {
1092
    return GC_mark_state != MS_NONE;
1093
  }
1094
#else
1095
# define collection_in_progress() GC_collection_in_progress()
1096
#endif
1097

1098
/* We hold the GC lock.  Wait until an in-progress GC has finished.     */
1099
/* Repeatedly RELEASES GC LOCK in order to wait.                        */
1100
/* If wait_for_all is true, then we exit with the GC lock held and no   */
1101
/* collection in progress; otherwise we just wait for the current GC    */
1102
/* to finish.                                                           */
1103
STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all)
25,256✔
1104
{
1105
    DCL_LOCK_STATE;
1106
#   if !defined(THREAD_SANITIZER) || !defined(CAN_CALL_ATFORK)
1107
      /* GC_lock_holder is accessed with the lock held, so there is no  */
1108
      /* data race actually (unlike what is reported by TSan).          */
1109
      GC_ASSERT(I_HOLD_LOCK());
25,256✔
1110
#   endif
1111
    ASSERT_CANCEL_DISABLED();
25,256✔
1112
    if (GC_incremental && collection_in_progress()) {
25,256✔
1113
        word old_gc_no = GC_gc_no;
×
1114

1115
        /* Make sure that no part of our stack is still on the mark stack, */
1116
        /* since it's about to be unmapped.                                */
1117
        while (GC_incremental && collection_in_progress()
×
1118
               && (wait_for_all || old_gc_no == GC_gc_no)) {
×
1119
            ENTER_GC();
×
1120
            GC_in_thread_creation = TRUE;
×
1121
            GC_collect_a_little_inner(1);
×
1122
            GC_in_thread_creation = FALSE;
×
1123
            EXIT_GC();
×
1124
            UNLOCK();
×
1125
            sched_yield();
×
1126
            LOCK();
×
1127
        }
1128
    }
1129
}
25,256✔
1130

1131
#ifdef CAN_HANDLE_FORK
1132
/* Procedures called before and after a fork.  The goal here is to make */
1133
/* it safe to call GC_malloc() in a forked child.  It's unclear that is */
1134
/* attainable, since the single UNIX spec seems to imply that one       */
1135
/* should only call async-signal-safe functions, and we probably can't  */
1136
/* quite guarantee that.  But we give it our best shot.  (That same     */
1137
/* spec also implies that it's not safe to call the system malloc       */
1138
/* between fork() and exec().  Thus we're doing no worse than it.)      */
1139

1140
IF_CANCEL(static int fork_cancel_state;)
1141
                                /* protected by allocation lock.        */
1142

1143
# ifdef PARALLEL_MARK
1144
#   ifdef THREAD_SANITIZER
1145
#     if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1146
        STATIC void GC_generic_lock(pthread_mutex_t *);
1147
#     endif
1148
      GC_ATTR_NO_SANITIZE_THREAD
1149
      static void wait_for_reclaim_atfork(void);
1150
#   else
1151
#     define wait_for_reclaim_atfork() GC_wait_for_reclaim()
1152
#   endif
1153
# endif /* PARALLEL_MARK */
1154

1155
/* Called before a fork()               */
1156
#if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1157
  /* GC_lock_holder is updated safely (no data race actually).  */
1158
  GC_ATTR_NO_SANITIZE_THREAD
1159
#endif
1160
static void fork_prepare_proc(void)
84✔
1161
{
1162
    /* Acquire all relevant locks, so that after releasing the locks    */
1163
    /* the child will see a consistent state in which monitor           */
1164
    /* invariants hold.  Unfortunately, we can't acquire libc locks     */
1165
    /* we might need, and there seems to be no guarantee that libc      */
1166
    /* must install a suitable fork handler.                            */
1167
    /* Wait for an ongoing GC to finish, since we can't finish it in    */
1168
    /* the (one remaining thread in) the child.                         */
1169
      LOCK();
84✔
1170
      DISABLE_CANCEL(fork_cancel_state);
87✔
1171
                /* Following waits may include cancellation points. */
1172
#     if defined(PARALLEL_MARK)
1173
        if (GC_parallel)
87✔
1174
          wait_for_reclaim_atfork();
87✔
1175
#     endif
1176
      GC_wait_for_gc_completion(TRUE);
87✔
1177
#     if defined(PARALLEL_MARK)
1178
        if (GC_parallel) {
87✔
1179
#         if defined(THREAD_SANITIZER) && defined(GC_ASSERTIONS) \
1180
             && defined(CAN_CALL_ATFORK)
1181
            /* Prevent TSan false positive about the data race  */
1182
            /* when updating GC_mark_lock_holder.               */
1183
            GC_generic_lock(&mark_mutex);
1184
#         else
1185
            GC_acquire_mark_lock();
87✔
1186
#         endif
1187
        }
1188
#     endif
1189
      GC_acquire_dirty_lock();
1190
}
87✔
1191

1192
/* Called in parent after a fork() (even if the latter failed). */
1193
#if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1194
  GC_ATTR_NO_SANITIZE_THREAD
1195
#endif
1196
static void fork_parent_proc(void)
47✔
1197
{
1198
    GC_release_dirty_lock();
1199
#   if defined(PARALLEL_MARK)
1200
      if (GC_parallel) {
47✔
1201
#       if defined(THREAD_SANITIZER) && defined(GC_ASSERTIONS) \
1202
           && defined(CAN_CALL_ATFORK)
1203
          /* To match that in fork_prepare_proc. */
1204
          (void)pthread_mutex_unlock(&mark_mutex);
1205
#       else
1206
          GC_release_mark_lock();
47✔
1207
#       endif
1208
      }
1209
#   endif
1210
    RESTORE_CANCEL(fork_cancel_state);
47✔
1211
    UNLOCK();
47✔
1212
}
47✔
1213

1214
/* Called in child after a fork()       */
1215
#if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1216
  GC_ATTR_NO_SANITIZE_THREAD
1217
#endif
1218
static void fork_child_proc(void)
42✔
1219
{
1220
    GC_release_dirty_lock();
1221
#   ifdef PARALLEL_MARK
1222
      if (GC_parallel) {
42✔
1223
#       if defined(THREAD_SANITIZER) && defined(GC_ASSERTIONS) \
1224
           && defined(CAN_CALL_ATFORK)
1225
          (void)pthread_mutex_unlock(&mark_mutex);
1226
#       else
1227
          GC_release_mark_lock();
42✔
1228
#       endif
1229
        /* Turn off parallel marking in the child, since we are probably  */
1230
        /* just going to exec, and we would have to restart mark threads. */
1231
        GC_parallel = FALSE;
42✔
1232
      }
1233
#     ifdef THREAD_SANITIZER
1234
        /* TSan does not support threads creation in the child process. */
1235
        available_markers_m1 = 0;
1236
#     endif
1237
#   endif
1238
    /* Clean up the thread table, so that just our thread is left.      */
1239
    GC_remove_all_threads_but_me();
42✔
1240
#   ifndef GC_DISABLE_INCREMENTAL
1241
      GC_dirty_update_child();
42✔
1242
#   endif
1243
    RESTORE_CANCEL(fork_cancel_state);
42✔
1244
    UNLOCK();
42✔
1245
    /* Even though after a fork the child only inherits the single      */
1246
    /* thread that called the fork(), if another thread in the parent   */
1247
    /* was attempting to lock the mutex while being held in             */
1248
    /* fork_child_prepare(), the mutex will be left in an inconsistent  */
1249
    /* state in the child after the UNLOCK.  This is the case, at       */
1250
    /* least, in Mac OS X and leads to an unusable GC in the child      */
1251
    /* which will block when attempting to perform any GC operation     */
1252
    /* that acquires the allocation mutex.                              */
1253
#   ifdef USE_PTHREAD_LOCKS
1254
      GC_ASSERT(I_DONT_HOLD_LOCK());
42✔
1255
      /* Reinitialize the mutex.  It should be safe since we are        */
1256
      /* running this in the child which only inherits a single thread. */
1257
      /* mutex_destroy() may return EBUSY, which makes no sense, but    */
1258
      /* that is the reason for the need of the reinitialization.       */
1259
      (void)pthread_mutex_destroy(&GC_allocate_ml);
42✔
1260
      /* TODO: Probably some targets might need the default mutex       */
1261
      /* attribute to be passed instead of NULL.                        */
1262
      if (0 != pthread_mutex_init(&GC_allocate_ml, NULL))
42✔
1263
        ABORT("pthread_mutex_init failed (in child)");
×
1264
#   endif
1265
}
42✔
1266

1267
  /* Routines for fork handling by client (no-op if pthread_atfork works). */
1268
  GC_API void GC_CALL GC_atfork_prepare(void)
43✔
1269
  {
1270
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
43✔
1271
#   if defined(GC_DARWIN_THREADS) && defined(MPROTECT_VDB)
1272
      if (GC_auto_incremental) {
1273
        GC_ASSERT(0 == GC_handle_fork);
1274
        ABORT("Unable to fork while mprotect_thread is running");
1275
      }
1276
#   endif
1277
    if (GC_handle_fork <= 0)
43✔
1278
      fork_prepare_proc();
×
1279
  }
43✔
1280

1281
  GC_API void GC_CALL GC_atfork_parent(void)
47✔
1282
  {
1283
    if (GC_handle_fork <= 0)
47✔
1284
      fork_parent_proc();
×
1285
  }
47✔
1286

1287
  GC_API void GC_CALL GC_atfork_child(void)
42✔
1288
  {
1289
    if (GC_handle_fork <= 0)
42✔
1290
      fork_child_proc();
×
1291
  }
42✔
1292
#endif /* CAN_HANDLE_FORK */
1293

1294
#ifdef INCLUDE_LINUX_THREAD_DESCR
1295
  __thread int GC_dummy_thread_local;
1296
#endif
1297

1298
#ifdef PARALLEL_MARK
1299
  static void setup_mark_lock(void);
1300

1301
  static unsigned required_markers_cnt = 0;
1302
                        /* The default value (0) means the number of    */
1303
                        /* markers should be selected automatically.    */
1304
#endif /* PARALLEL_MARK */
1305

1306
GC_API void GC_CALL GC_set_markers_count(unsigned markers GC_ATTR_UNUSED)
2✔
1307
{
1308
# ifdef PARALLEL_MARK
1309
    required_markers_cnt = markers < MAX_MARKERS ? markers : MAX_MARKERS;
2✔
1310
# endif
1311
}
2✔
1312

1313
#ifndef DONT_USE_ATEXIT
1314
  STATIC pthread_t GC_main_thread_id;
1315

1316
  GC_INNER GC_bool GC_is_main_thread(void)
4✔
1317
  {
1318
    GC_ASSERT(GC_thr_initialized);
4✔
1319
    return THREAD_EQUAL(GC_main_thread_id, pthread_self());
4✔
1320
  }
1321
#endif /* !DONT_USE_ATEXIT */
1322

1323
GC_INNER void GC_thr_init(void)
31✔
1324
{
1325
  GC_ASSERT(I_HOLD_LOCK());
31✔
1326
  if (GC_thr_initialized) return;
31✔
1327
  GC_thr_initialized = TRUE;
31✔
1328

1329
  GC_ASSERT((word)&GC_threads % sizeof(word) == 0);
1330
# ifdef CAN_HANDLE_FORK
1331
    /* Prepare for forks if requested.  */
1332
    if (GC_handle_fork) {
31✔
1333
#     ifdef CAN_CALL_ATFORK
1334
        if (pthread_atfork(fork_prepare_proc, fork_parent_proc,
31✔
1335
                           fork_child_proc) == 0) {
1336
          /* Handlers successfully registered.  */
1337
          GC_handle_fork = 1;
31✔
1338
        } else
1339
#     endif
1340
      /* else */ if (GC_handle_fork != -1)
×
1341
        ABORT("pthread_atfork failed");
×
1342
    }
1343
# endif
1344
# ifdef INCLUDE_LINUX_THREAD_DESCR
1345
    /* Explicitly register the region including the address     */
1346
    /* of a thread local variable.  This should include thread  */
1347
    /* locals for the main thread, except for those allocated   */
1348
    /* in response to dlopen calls.                             */
1349
    {
1350
      ptr_t thread_local_addr = (ptr_t)(&GC_dummy_thread_local);
1351
      ptr_t main_thread_start, main_thread_end;
1352
      if (!GC_enclosing_mapping(thread_local_addr, &main_thread_start,
1353
                                &main_thread_end)) {
1354
        ABORT("Failed to find mapping for main thread thread locals");
1355
      } else {
1356
        /* main_thread_start and main_thread_end are initialized.       */
1357
        GC_add_roots_inner(main_thread_start, main_thread_end, FALSE);
1358
      }
1359
    }
1360
# endif
1361
  /* Add the initial thread, so we can stop it. */
1362
  {
1363
    pthread_t self = pthread_self();
31✔
1364
    GC_thread t = GC_new_thread(self);
31✔
1365

1366
    if (t == NULL)
31✔
1367
      ABORT("Failed to allocate memory for the initial thread");
×
1368
#   ifdef GC_DARWIN_THREADS
1369
      t -> stop_info.mach_thread = mach_thread_self();
1370
#   else
1371
      t -> stop_info.stack_ptr = GC_approx_sp();
31✔
1372
#   endif
1373
#   ifndef DONT_USE_ATEXIT
1374
      GC_main_thread_id = self;
31✔
1375
#   endif
1376
    t -> flags = DETACHED | MAIN_THREAD;
31✔
1377
    if (THREAD_EQUAL(self, main_pthread_id)) {
31✔
1378
      t -> stack = (ptr_t)main_stack;
×
1379
      t -> stack_size = main_stack_size;
×
1380
      t -> altstack = (ptr_t)main_altstack;
×
1381
      t -> altstack_size = main_altstack_size;
×
1382
    }
1383
  }
1384

1385
  /* Set GC_nprocs and available_markers_m1.    */
1386
  {
1387
    char * nprocs_string = GETENV("GC_NPROCS");
31✔
1388
    GC_nprocs = -1;
31✔
1389
    if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
31✔
1390
  }
1391
  if (GC_nprocs <= 0
31✔
1392
#     if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
1393
        && (GC_nprocs = GC_get_nprocs_present()) <= 1
1394
                                /* Workaround for some Linux/arm kernels */
1395
#     endif
1396
      )
1397
  {
1398
    GC_nprocs = GC_get_nprocs();
31✔
1399
  }
1400
  if (GC_nprocs <= 0) {
31✔
1401
    WARN("GC_get_nprocs() returned %" WARN_PRIdPTR "\n",
×
1402
         (signed_word)GC_nprocs);
1403
    GC_nprocs = 2; /* assume dual-core */
×
1404
#   ifdef PARALLEL_MARK
1405
      available_markers_m1 = 0; /* but use only one marker */
×
1406
#   endif
1407
  } else {
1408
#   ifdef PARALLEL_MARK
1409
      {
1410
        char * markers_string = GETENV("GC_MARKERS");
31✔
1411
        int markers = required_markers_cnt;
31✔
1412

1413
        if (markers_string != NULL) {
31✔
1414
          markers = atoi(markers_string);
×
1415
          if (markers <= 0 || markers > MAX_MARKERS) {
×
1416
            WARN("Too big or invalid number of mark threads: %" WARN_PRIdPTR
×
1417
                 "; using maximum threads\n", (signed_word)markers);
1418
            markers = MAX_MARKERS;
×
1419
          }
1420
        } else if (0 == markers) {
31✔
1421
          /* Unless the client sets the desired number of       */
1422
          /* parallel markers, it is determined based on the    */
1423
          /* number of CPU cores.                               */
1424
          markers = GC_nprocs;
31✔
1425
#         if defined(GC_MIN_MARKERS) && !defined(CPPCHECK)
1426
            /* This is primarily for targets without getenv().  */
1427
            if (markers < GC_MIN_MARKERS)
1428
              markers = GC_MIN_MARKERS;
1429
#         endif
1430
          if (markers > MAX_MARKERS)
31✔
1431
            markers = MAX_MARKERS; /* silently limit the value */
×
1432
        }
1433
        available_markers_m1 = markers - 1;
31✔
1434
      }
1435
#   endif
1436
  }
1437
  GC_COND_LOG_PRINTF("Number of processors: %d\n", GC_nprocs);
31✔
1438

1439
# if defined(BASE_ATOMIC_OPS_EMULATED) && !defined(GC_DARWIN_THREADS) \
1440
     && !defined(GC_OPENBSD_UTHREADS) && !defined(NACL) \
1441
     && !defined(PLATFORM_STOP_WORLD) && !defined(SN_TARGET_PSP2)
1442
    /* Ensure the process is running on just one CPU core.      */
1443
    /* This is needed because the AO primitives emulated with   */
1444
    /* locks cannot be used inside signal handlers.             */
1445
    {
1446
      cpu_set_t mask;
1447
      int cpu_set_cnt = 0;
1448
      int cpu_lowest_set = 0;
1449
      int i = GC_nprocs > 1 ? GC_nprocs : 2; /* check at least 2 cores */
1450

1451
      if (sched_getaffinity(0 /* current process */,
1452
                            sizeof(mask), &mask) == -1)
1453
        ABORT_ARG1("sched_getaffinity failed", ": errno= %d", errno);
1454
      while (i-- > 0)
1455
        if (CPU_ISSET(i, &mask)) {
1456
          cpu_lowest_set = i;
1457
          cpu_set_cnt++;
1458
        }
1459
      if (0 == cpu_set_cnt)
1460
        ABORT("sched_getaffinity returned empty mask");
1461
      if (cpu_set_cnt > 1) {
1462
        CPU_ZERO(&mask);
1463
        CPU_SET(cpu_lowest_set, &mask); /* select just one CPU */
1464
        if (sched_setaffinity(0, sizeof(mask), &mask) == -1)
1465
          ABORT_ARG1("sched_setaffinity failed", ": errno= %d", errno);
1466
        WARN("CPU affinity mask is set to %p\n", (word)1 << cpu_lowest_set);
1467
      }
1468
    }
1469
# endif /* BASE_ATOMIC_OPS_EMULATED */
1470

1471
# ifndef GC_DARWIN_THREADS
1472
    GC_stop_init();
31✔
1473
# endif
1474

1475
# ifdef PARALLEL_MARK
1476
    if (available_markers_m1 <= 0) {
31✔
1477
      /* Disable parallel marking.      */
1478
      GC_parallel = FALSE;
×
1479
      GC_COND_LOG_PRINTF(
×
1480
                "Single marker thread, turning off parallel marking\n");
1481
    } else {
1482
      setup_mark_lock();
31✔
1483
    }
1484
# endif
1485
}
1486

1487
/* Perform all initializations, including those that    */
1488
/* may require allocation.                              */
1489
/* Called without allocation lock.                      */
1490
/* Must be called before a second thread is created.    */
1491
/* Did we say it's called without the allocation lock?  */
1492
GC_INNER void GC_init_parallel(void)
31✔
1493
{
1494
#   if defined(THREAD_LOCAL_ALLOC)
1495
      DCL_LOCK_STATE;
1496
#   endif
1497
    if (parallel_initialized) return;
31✔
1498
    parallel_initialized = TRUE;
31✔
1499

1500
    /* GC_init() calls us back, so set flag first.      */
1501
    if (!GC_is_initialized) GC_init();
31✔
1502
    /* Initialize thread local free lists if used.      */
1503
#   if defined(THREAD_LOCAL_ALLOC)
1504
      LOCK();
31✔
1505
      GC_init_thread_local(&(GC_lookup_thread(pthread_self())->tlfs));
31✔
1506
      UNLOCK();
31✔
1507
#   endif
1508
}
1509

1510
#ifndef GC_NO_PTHREAD_SIGMASK
1511
  GC_API int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set,
×
1512
                                        sigset_t *oset)
1513
  {
1514
    sigset_t fudged_set;
1515

1516
    INIT_REAL_SYMS();
1517
    if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
×
1518
        int sig_suspend = GC_get_suspend_signal();
×
1519

1520
        fudged_set = *set;
×
1521
        GC_ASSERT(sig_suspend >= 0);
×
1522
        if (sigdelset(&fudged_set, sig_suspend) != 0)
×
1523
            ABORT("sigdelset failed");
×
1524
        set = &fudged_set;
×
1525
    }
1526
    return(REAL_FUNC(pthread_sigmask)(how, set, oset));
×
1527
  }
1528
#endif /* !GC_NO_PTHREAD_SIGMASK */
1529

1530
static GC_bool do_blocking_enter(GC_thread me)
84✔
1531
{
1532
#   if defined(SPARC) || defined(IA64)
1533
        ptr_t stack_ptr = GC_save_regs_in_stack();
1534
        /* TODO: regs saving already done by GC_with_callee_saves_pushed */
1535
#   elif defined(E2K)
1536
        size_t stack_size;
1537
#   endif
1538
    GC_bool topOfStackUnset = FALSE;
84✔
1539

1540
    GC_ASSERT(I_HOLD_LOCK());
84✔
1541
    GC_ASSERT(!(me -> thread_blocked));
84✔
1542
#   ifdef SPARC
1543
        me -> stop_info.stack_ptr = stack_ptr;
1544
#   else
1545
        me -> stop_info.stack_ptr = GC_approx_sp();
84✔
1546
#   endif
1547
#   if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
1548
        if (me -> topOfStack == NULL) {
1549
            /* GC_do_blocking_inner is not called recursively,  */
1550
            /* so topOfStack should be computed now.            */
1551
            topOfStackUnset = TRUE;
1552
            me -> topOfStack = GC_FindTopOfStack(0);
1553
        }
1554
#   endif
1555
#   ifdef IA64
1556
        me -> backing_store_ptr = stack_ptr;
1557
#   elif defined(E2K)
1558
        GC_ASSERT(NULL == me -> backing_store_end);
1559
        stack_size = GC_alloc_and_get_procedure_stack(&me->backing_store_end);
1560
        me->backing_store_ptr = me->backing_store_end + stack_size;
1561
#   endif
1562
    me -> thread_blocked = (unsigned char)TRUE;
84✔
1563
    /* Save context here if we want to support precise stack marking */
1564
    return topOfStackUnset;
84✔
1565
}
1566

1567
static void do_blocking_leave(GC_thread me, GC_bool topOfStackUnset)
84✔
1568
{
1569
    GC_ASSERT(I_HOLD_LOCK());
84✔
1570
#   if defined(CPPCHECK)
1571
      GC_noop1((word)&me->thread_blocked);
1572
#   endif
1573
    me -> thread_blocked = FALSE;
84✔
1574
#   ifdef E2K
1575
        GC_ASSERT(me -> backing_store_end != NULL);
1576
         /* Note that me->backing_store_end value here may differ from  */
1577
         /* the one stored in this function previously.                 */
1578
        GC_INTERNAL_FREE(me -> backing_store_end);
1579
        me -> backing_store_ptr = NULL;
1580
        me -> backing_store_end = NULL;
1581
#   endif
1582
#   if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
1583
        if (topOfStackUnset)
1584
            me -> topOfStack = NULL; /* make topOfStack unset again */
1585
#   else
1586
        (void)topOfStackUnset;
1587
#   endif
1588
}
84✔
1589

1590
/* Wrapper for functions that are likely to block for an appreciable    */
1591
/* length of time.                                                      */
1592
GC_INNER void GC_do_blocking_inner(ptr_t data, void * context GC_ATTR_UNUSED)
84✔
1593
{
1594
    struct blocking_data *d = (struct blocking_data *)data;
84✔
1595
    GC_thread me;
1596
    GC_bool topOfStackUnset;
1597
    DCL_LOCK_STATE;
1598

1599
    LOCK();
84✔
1600
    me = GC_lookup_thread(pthread_self());
84✔
1601
    topOfStackUnset = do_blocking_enter(me);
84✔
1602
    UNLOCK();
84✔
1603

1604
    d -> client_data = (d -> fn)(d -> client_data);
84✔
1605

1606
    LOCK();   /* This will block if the world is stopped.       */
84✔
1607
#   ifdef LINT2
1608
      {
1609
#        ifdef GC_ASSERTIONS
1610
           GC_thread saved_me = me;
1611
#        endif
1612

1613
         /* The pointer to the GC thread descriptor should not be   */
1614
         /* changed while the thread is registered but a static     */
1615
         /* analysis tool might complain that this pointer value    */
1616
         /* (obtained in the first locked section) is unreliable in */
1617
         /* the second locked section.                              */
1618
         me = GC_lookup_thread(pthread_self());
1619
         GC_ASSERT(me == saved_me);
1620
      }
1621
#   endif
1622
#   if defined(GC_ENABLE_SUSPEND_THREAD) && !defined(GC_DARWIN_THREADS) \
1623
       && !defined(GC_OPENBSD_UTHREADS) && !defined(NACL) \
1624
       && !defined(PLATFORM_STOP_WORLD) && !defined(SN_TARGET_PSP2)
1625
      /* Note: this code cannot be moved into do_blocking_leave()   */
1626
      /* otherwise there could be a static analysis tool warning    */
1627
      /* (false positive) about unlock without a matching lock.     */
1628
      while (EXPECT((me -> stop_info.ext_suspend_cnt & 1) != 0, FALSE)) {
168✔
1629
        word suspend_cnt = (word)(me -> stop_info.ext_suspend_cnt);
×
1630
                        /* read suspend counter (number) before unlocking */
1631

1632
        UNLOCK();
×
1633
        GC_suspend_self_inner(me, suspend_cnt);
×
1634
        LOCK();
×
1635
      }
1636
#   endif
1637
    do_blocking_leave(me, topOfStackUnset);
84✔
1638
    UNLOCK();
84✔
1639
}
84✔
1640

1641
#if defined(GC_ENABLE_SUSPEND_THREAD) && !defined(GC_DARWIN_THREADS) \
1642
    && !defined(GC_OPENBSD_UTHREADS) && !defined(NACL) \
1643
    && !defined(PLATFORM_STOP_WORLD) && !defined(SN_TARGET_PSP2)
1644
  /* Similar to GC_do_blocking_inner() but assuming the GC lock is held */
1645
  /* and fn is GC_suspend_self_inner.                                   */
1646
  GC_INNER void GC_suspend_self_blocked(ptr_t thread_me,
×
1647
                                        void * context GC_ATTR_UNUSED)
1648
  {
1649
    GC_thread me = (GC_thread)thread_me;
×
1650
    GC_bool topOfStackUnset;
1651
    DCL_LOCK_STATE;
1652

1653
    GC_ASSERT(I_HOLD_LOCK());
×
1654
    topOfStackUnset = do_blocking_enter(me);
×
1655
    while ((me -> stop_info.ext_suspend_cnt & 1) != 0) {
×
1656
      word suspend_cnt = (word)(me -> stop_info.ext_suspend_cnt);
×
1657

1658
      UNLOCK();
×
1659
      GC_suspend_self_inner(me, suspend_cnt);
×
1660
      LOCK();
×
1661
    }
1662
    do_blocking_leave(me, topOfStackUnset);
×
1663
  }
×
1664
#endif
1665

1666
GC_API void GC_CALL GC_set_stackbottom(void *gc_thread_handle,
42✔
1667
                                       const struct GC_stack_base *sb)
1668
{
1669
    GC_thread t = (GC_thread)gc_thread_handle;
42✔
1670

1671
    GC_ASSERT(sb -> mem_base != NULL);
42✔
1672
    if (!EXPECT(GC_is_initialized, TRUE)) {
42✔
1673
        GC_ASSERT(NULL == t);
×
1674
    } else {
1675
        GC_ASSERT(I_HOLD_LOCK());
42✔
1676
        if (NULL == t) /* current thread? */
42✔
1677
            t = GC_lookup_thread(pthread_self());
×
1678
        GC_ASSERT((t -> flags & FINISHED) == 0);
42✔
1679
        GC_ASSERT(!(t -> thread_blocked)
42✔
1680
                  && NULL == t -> traced_stack_sect); /* for now */
1681

1682
        if ((t -> flags & MAIN_THREAD) == 0) {
42✔
1683
            t -> stack_end = (ptr_t)sb->mem_base;
40✔
1684
#           ifdef IA64
1685
                t -> backing_store_end = (ptr_t)sb->reg_base;
1686
#           endif
1687
            return;
40✔
1688
        }
1689
        /* Otherwise alter the stack bottom of the primordial thread.   */
1690
    }
1691

1692
    GC_stackbottom = (char*)sb->mem_base;
2✔
1693
#   ifdef IA64
1694
        GC_register_stackbottom = (ptr_t)sb->reg_base;
1695
#   endif
1696
}
1697

1698
GC_API void * GC_CALL GC_get_my_stackbottom(struct GC_stack_base *sb)
47✔
1699
{
1700
    pthread_t self = pthread_self();
47✔
1701
    GC_thread me;
1702
    DCL_LOCK_STATE;
1703

1704
    LOCK();
47✔
1705
    me = GC_lookup_thread(self);
47✔
1706
    /* The thread is assumed to be registered.  */
1707
    if ((me -> flags & MAIN_THREAD) == 0) {
47✔
1708
        sb -> mem_base = me -> stack_end;
45✔
1709
#       ifdef IA64
1710
            sb -> reg_base = me -> backing_store_end;
1711
#       elif defined(E2K)
1712
            sb -> reg_base = NULL;
1713
#       endif
1714
    } else {
1715
        sb -> mem_base = GC_stackbottom;
2✔
1716
#       ifdef IA64
1717
            sb -> reg_base = GC_register_stackbottom;
1718
#       elif defined(E2K)
1719
            sb -> reg_base = NULL;
1720
#       endif
1721
    }
1722
    UNLOCK();
47✔
1723
    return (void *)me; /* gc_thread_handle */
47✔
1724
}
1725

1726
/* GC_call_with_gc_active() has the opposite to GC_do_blocking()        */
1727
/* functionality.  It might be called from a user function invoked by   */
1728
/* GC_do_blocking() to temporarily back allow calling any GC function   */
1729
/* and/or manipulating pointers to the garbage collected heap.          */
1730
GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
84✔
1731
                                             void * client_data)
1732
{
1733
    struct GC_traced_stack_sect_s stacksect;
1734
    pthread_t self = pthread_self();
84✔
1735
    GC_thread me;
1736
#   ifdef E2K
1737
      size_t stack_size;
1738
#   endif
1739
    DCL_LOCK_STATE;
1740

1741
    LOCK();   /* This will block if the world is stopped.       */
84✔
1742
    me = GC_lookup_thread(self);
84✔
1743

1744
    /* Adjust our stack bottom value (this could happen unless  */
1745
    /* GC_get_stack_base() was used which returned GC_SUCCESS). */
1746
    if ((me -> flags & MAIN_THREAD) == 0) {
84✔
1747
      GC_ASSERT(me -> stack_end != NULL);
80✔
1748
      if ((word)me->stack_end HOTTER_THAN (word)(&stacksect))
80✔
1749
        me -> stack_end = (ptr_t)(&stacksect);
×
1750
    } else {
1751
      /* The original stack. */
1752
      if ((word)GC_stackbottom HOTTER_THAN (word)(&stacksect))
4✔
1753
        GC_stackbottom = (ptr_t)COVERT_DATAFLOW(&stacksect);
×
1754
    }
1755

1756
    if (!me->thread_blocked) {
84✔
1757
      /* We are not inside GC_do_blocking() - do nothing more.  */
1758
      UNLOCK();
×
1759
      client_data = fn(client_data);
×
1760
      /* Prevent treating the above as a tail call.     */
1761
      GC_noop1(COVERT_DATAFLOW(&stacksect));
×
1762
      return client_data; /* result */
84✔
1763
    }
1764

1765
#   if defined(GC_ENABLE_SUSPEND_THREAD) && !defined(GC_DARWIN_THREADS) \
1766
       && !defined(GC_OPENBSD_UTHREADS) && !defined(NACL) \
1767
       && !defined(PLATFORM_STOP_WORLD) && !defined(SN_TARGET_PSP2)
1768
      while (EXPECT((me -> stop_info.ext_suspend_cnt & 1) != 0, FALSE)) {
168✔
1769
        word suspend_cnt = (word)(me -> stop_info.ext_suspend_cnt);
×
1770
        UNLOCK();
×
1771
        GC_suspend_self_inner(me, suspend_cnt);
×
1772
        LOCK();
×
1773
      }
1774
#   endif
1775

1776
    /* Setup new "stack section".       */
1777
    stacksect.saved_stack_ptr = me -> stop_info.stack_ptr;
84✔
1778
#   ifdef IA64
1779
      /* This is the same as in GC_call_with_stack_base().      */
1780
      stacksect.backing_store_end = GC_save_regs_in_stack();
1781
      /* Unnecessarily flushes register stack,          */
1782
      /* but that probably doesn't hurt.                */
1783
      stacksect.saved_backing_store_ptr = me -> backing_store_ptr;
1784
#   elif defined(E2K)
1785
      GC_ASSERT(me -> backing_store_end != NULL);
1786
      GC_INTERNAL_FREE(me -> backing_store_end);
1787
      me -> backing_store_ptr = NULL;
1788
      me -> backing_store_end = NULL;
1789
#   endif
1790
    stacksect.prev = me -> traced_stack_sect;
84✔
1791
    me -> thread_blocked = FALSE;
84✔
1792
    me -> traced_stack_sect = &stacksect;
84✔
1793

1794
    UNLOCK();
84✔
1795
    client_data = fn(client_data);
84✔
1796
    GC_ASSERT(me -> thread_blocked == FALSE);
84✔
1797
    GC_ASSERT(me -> traced_stack_sect == &stacksect);
84✔
1798

1799
    /* Restore original "stack section".        */
1800
#   if defined(CPPCHECK)
1801
      GC_noop1((word)me->traced_stack_sect);
1802
#   endif
1803
#   ifdef E2K
1804
      (void)GC_save_regs_in_stack();
1805
#   endif
1806
    LOCK();
84✔
1807
    me -> traced_stack_sect = stacksect.prev;
84✔
1808
#   ifdef IA64
1809
      me -> backing_store_ptr = stacksect.saved_backing_store_ptr;
1810
#   elif defined(E2K)
1811
      GC_ASSERT(NULL == me -> backing_store_end);
1812
      stack_size = GC_alloc_and_get_procedure_stack(&me->backing_store_end);
1813
      me->backing_store_ptr = me->backing_store_end + stack_size;
1814
#   endif
1815
    me -> thread_blocked = (unsigned char)TRUE;
84✔
1816
    me -> stop_info.stack_ptr = stacksect.saved_stack_ptr;
84✔
1817
    UNLOCK();
84✔
1818

1819
    return client_data; /* result */
84✔
1820
}
1821

1822
STATIC void GC_unregister_my_thread_inner(GC_thread me)
25,114✔
1823
{
1824
    GC_ASSERT(I_HOLD_LOCK());
25,114✔
1825
#   ifdef DEBUG_THREADS
1826
      GC_log_printf(
1827
                "Unregistering thread %p, gc_thread= %p, n_threads= %d\n",
1828
                (void *)me->id, (void *)me, GC_count_threads());
1829
#   endif
1830
    GC_ASSERT(!(me -> flags & FINISHED));
25,114✔
1831
#   if defined(THREAD_LOCAL_ALLOC)
1832
      GC_ASSERT(GC_getspecific(GC_thread_key) == &me->tlfs);
25,114✔
1833
      GC_destroy_thread_local(&(me->tlfs));
25,114✔
1834
#   endif
1835
#   ifdef NACL
1836
      GC_nacl_shutdown_gc_thread();
1837
      GC_nacl_gc_thread_self = NULL;
1838
#   endif
1839
#   if defined(GC_HAVE_PTHREAD_EXIT) || !defined(GC_NO_PTHREAD_CANCEL)
1840
      /* Handle DISABLED_GC flag which is set by the    */
1841
      /* intercepted pthread_cancel or pthread_exit.    */
1842
      if ((me -> flags & DISABLED_GC) != 0) {
25,114✔
1843
        GC_dont_gc--;
×
1844
      }
1845
#   endif
1846
    if (me -> flags & DETACHED) {
25,114✔
1847
        GC_delete_thread(pthread_self());
22,761✔
1848
    } else {
1849
        me -> flags |= FINISHED;
2,353✔
1850
    }
1851
#   if defined(THREAD_LOCAL_ALLOC)
1852
      /* It is required to call remove_specific defined in specific.c. */
1853
      GC_remove_specific(GC_thread_key);
25,114✔
1854
#   endif
1855
}
25,114✔
1856

1857
GC_API int GC_CALL GC_unregister_my_thread(void)
11,244✔
1858
{
1859
    pthread_t self = pthread_self();
11,244✔
1860
    GC_thread me;
1861
    IF_CANCEL(int cancel_state;)
1862
    DCL_LOCK_STATE;
1863

1864
    LOCK();
11,246✔
1865
    DISABLE_CANCEL(cancel_state);
11,246✔
1866
    /* Wait for any GC that may be marking from our stack to    */
1867
    /* complete before we remove this thread.                   */
1868
    GC_wait_for_gc_completion(FALSE);
11,246✔
1869
    me = GC_lookup_thread(self);
11,246✔
1870
#   ifdef DEBUG_THREADS
1871
        GC_log_printf(
1872
                "Called GC_unregister_my_thread on %p, gc_thread= %p\n",
1873
                (void *)self, (void *)me);
1874
#   endif
1875
    GC_ASSERT(THREAD_EQUAL(me->id, self));
11,246✔
1876
    GC_unregister_my_thread_inner(me);
11,246✔
1877
    RESTORE_CANCEL(cancel_state);
11,246✔
1878
    UNLOCK();
11,246✔
1879
    return GC_SUCCESS;
11,246✔
1880
}
1881

1882
/* Called at thread exit.                               */
1883
/* Never called for main thread.  That's OK, since it   */
1884
/* results in at most a tiny one-time leak.  And        */
1885
/* linuxthreads doesn't reclaim the main threads        */
1886
/* resources or id anyway.                              */
1887
GC_INNER_PTHRSTART void GC_thread_exit_proc(void *arg)
13,868✔
1888
{
1889
    IF_CANCEL(int cancel_state;)
1890
    DCL_LOCK_STATE;
1891

1892
#   ifdef DEBUG_THREADS
1893
        GC_log_printf("Called GC_thread_exit_proc on %p, gc_thread= %p\n",
1894
                      (void *)((GC_thread)arg)->id, arg);
1895
#   endif
1896
    LOCK();
13,868✔
1897
    DISABLE_CANCEL(cancel_state);
13,868✔
1898
    GC_wait_for_gc_completion(FALSE);
13,868✔
1899
    GC_unregister_my_thread_inner((GC_thread)arg);
13,868✔
1900
    RESTORE_CANCEL(cancel_state);
13,868✔
1901
    UNLOCK();
13,868✔
1902
}
13,868✔
1903

1904
#if !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
1905
  GC_API int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
2,224✔
1906
  {
1907
    int result;
1908
    GC_thread t;
1909
    DCL_LOCK_STATE;
1910

1911
    INIT_REAL_SYMS();
1912
    LOCK();
2,224✔
1913
    t = (GC_thread)COVERT_DATAFLOW(GC_lookup_thread(thread));
2,224✔
1914
    /* This is guaranteed to be the intended one, since the thread id   */
1915
    /* can't have been recycled by pthreads.                            */
1916
    UNLOCK();
2,224✔
1917
    result = REAL_FUNC(pthread_join)(thread, retval);
2,224✔
1918
# if defined(GC_FREEBSD_THREADS)
1919
    /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
1920
       appears to be) a spurious EINTR which caused the test and real code
1921
       to gratuitously fail.  Having looked at system pthread library source
1922
       code, I see how this return code may be generated.  In one path of
1923
       code, pthread_join() just returns the errno setting of the thread
1924
       being joined.  This does not match the POSIX specification or the
1925
       local man pages thus I have taken the liberty to catch this one
1926
       spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
1927
    if (result == EINTR) result = 0;
1928
# endif
1929
    if (result == 0) {
2,224✔
1930
        LOCK();
2,224✔
1931
        /* Here the pthread thread id may have been recycled.           */
1932
        /* Delete the thread from GC_threads (unless it has been        */
1933
        /* registered again from the client thread key destructor).     */
1934
        if ((t -> flags & FINISHED) != 0)
2,224✔
1935
          GC_delete_gc_thread(t);
2,222✔
1936
        UNLOCK();
2,224✔
1937
    }
1938
    return result;
2,224✔
1939
  }
1940

1941
  GC_API int WRAP_FUNC(pthread_detach)(pthread_t thread)
399✔
1942
  {
1943
    int result;
1944
    GC_thread t;
1945
    DCL_LOCK_STATE;
1946

1947
    INIT_REAL_SYMS();
1948
    LOCK();
399✔
1949
    t = (GC_thread)COVERT_DATAFLOW(GC_lookup_thread(thread));
400✔
1950
    UNLOCK();
400✔
1951
    result = REAL_FUNC(pthread_detach)(thread);
400✔
1952
    if (result == 0) {
400✔
1953
      LOCK();
400✔
1954
      t -> flags |= DETACHED;
400✔
1955
      /* Here the pthread thread id may have been recycled. */
1956
      if ((t -> flags & FINISHED) != 0) {
400✔
1957
        GC_delete_gc_thread(t);
26✔
1958
      }
1959
      UNLOCK();
400✔
1960
    }
1961
    return result;
400✔
1962
  }
1963
#endif /* !SN_TARGET_ORBIS && !SN_TARGET_PSP2 */
1964

1965
#ifndef GC_NO_PTHREAD_CANCEL
1966
  /* We should deal with the fact that apparently on Solaris and,       */
1967
  /* probably, on some Linux we can't collect while a thread is         */
1968
  /* exiting, since signals aren't handled properly.  This currently    */
1969
  /* gives rise to deadlocks.  The only workaround seen is to intercept */
1970
  /* pthread_cancel() and pthread_exit(), and disable the collections   */
1971
  /* until the thread exit handler is called.  That's ugly, because we  */
1972
  /* risk growing the heap unnecessarily. But it seems that we don't    */
1973
  /* really have an option in that the process is not in a fully        */
1974
  /* functional state while a thread is exiting.                        */
1975
  GC_API int WRAP_FUNC(pthread_cancel)(pthread_t thread)
×
1976
  {
1977
#   ifdef CANCEL_SAFE
1978
      GC_thread t;
1979
      DCL_LOCK_STATE;
1980
#   endif
1981

1982
    INIT_REAL_SYMS();
1983
#   ifdef CANCEL_SAFE
1984
      LOCK();
×
1985
      t = GC_lookup_thread(thread);
×
1986
      /* We test DISABLED_GC because pthread_exit could be called at    */
1987
      /* the same time.  (If t is NULL then pthread_cancel should       */
1988
      /* return ESRCH.)                                                 */
1989
      if (t != NULL && (t -> flags & DISABLED_GC) == 0) {
×
1990
        t -> flags |= DISABLED_GC;
×
1991
        GC_dont_gc++;
×
1992
      }
1993
      UNLOCK();
×
1994
#   endif
1995
    return REAL_FUNC(pthread_cancel)(thread);
×
1996
  }
1997
#endif /* !GC_NO_PTHREAD_CANCEL */
1998

1999
#ifdef GC_HAVE_PTHREAD_EXIT
2000
  GC_API GC_PTHREAD_EXIT_ATTRIBUTE void WRAP_FUNC(pthread_exit)(void *retval)
×
2001
  {
2002
    pthread_t self = pthread_self();
×
2003
    GC_thread me;
2004
    DCL_LOCK_STATE;
2005

2006
    INIT_REAL_SYMS();
2007
    LOCK();
×
2008
    me = GC_lookup_thread(self);
×
2009
    /* We test DISABLED_GC because someone else could call    */
2010
    /* pthread_cancel at the same time.                       */
2011
    if (me != 0 && (me -> flags & DISABLED_GC) == 0) {
×
2012
      me -> flags |= DISABLED_GC;
×
2013
      GC_dont_gc++;
×
2014
    }
2015
    UNLOCK();
×
2016

2017
    REAL_FUNC(pthread_exit)(retval);
×
2018
  }
2019
#endif /* GC_HAVE_PTHREAD_EXIT */
2020

2021
GC_INNER GC_bool GC_in_thread_creation = FALSE;
2022
                                /* Protected by allocation lock. */
2023

2024
GC_INLINE void GC_record_stack_base(GC_thread me,
25,235✔
2025
                                    const struct GC_stack_base *sb)
2026
{
2027
#   ifndef GC_DARWIN_THREADS
2028
      me -> stop_info.stack_ptr = (ptr_t)sb->mem_base;
25,235✔
2029
#   endif
2030
    me -> stack_end = (ptr_t)sb->mem_base;
25,235✔
2031
    if (me -> stack_end == NULL)
25,235✔
2032
      ABORT("Bad stack base in GC_register_my_thread");
×
2033
#   ifdef IA64
2034
      me -> backing_store_end = (ptr_t)sb->reg_base;
2035
#   endif
2036
}
25,235✔
2037

2038
STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
25,131✔
2039
                                             pthread_t my_pthread)
2040
{
2041
    GC_thread me;
2042

2043
    GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */
25,131✔
2044
    me = GC_new_thread(my_pthread);
25,131✔
2045
    GC_in_thread_creation = FALSE;
25,131✔
2046
    if (me == 0)
25,131✔
2047
      ABORT("Failed to allocate memory for thread registering");
×
2048
#   ifdef GC_DARWIN_THREADS
2049
      me -> stop_info.mach_thread = mach_thread_self();
2050
#   endif
2051
    GC_record_stack_base(me, sb);
25,131✔
2052
#   ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
2053
      /* Since this could be executed from a detached thread    */
2054
      /* destructor, our signals might already be blocked.      */
2055
      GC_unblock_gc_signals();
2056
#   endif
2057
    return me;
25,131✔
2058
}
2059

2060
GC_API void GC_CALL GC_allow_register_threads(void)
×
2061
{
2062
# ifdef GC_ASSERTIONS
2063
    DCL_LOCK_STATE;
2064

2065
    /* Check GC is initialized and the current thread is registered. */
2066
    LOCK(); /* just to match that in win32_threads.c */
×
2067
    GC_ASSERT(GC_lookup_thread(pthread_self()) != 0);
×
2068
    UNLOCK();
×
2069
# endif
2070
  INIT_REAL_SYMS(); /* to initialize symbols while single-threaded */
2071
  GC_start_mark_threads();
×
2072
  set_need_to_lock();
×
2073
}
×
2074

2075
GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
11,364✔
2076
{
2077
    pthread_t self = pthread_self();
11,364✔
2078
    GC_thread me;
2079
    DCL_LOCK_STATE;
2080

2081
    if (GC_need_to_lock == FALSE)
11,364✔
2082
        ABORT("Threads explicit registering is not previously enabled");
×
2083

2084
    LOCK();
11,364✔
2085
    me = GC_lookup_thread(self);
11,364✔
2086
    if (0 == me) {
11,364✔
2087
        me = GC_register_my_thread_inner(sb, self);
11,260✔
2088
#       if defined(CPPCHECK)
2089
          GC_noop1(me->flags);
2090
#       endif
2091
        me -> flags |= DETACHED;
11,260✔
2092
          /* Treat as detached, since we do not need to worry about     */
2093
          /* pointer results.                                           */
2094
#       if defined(THREAD_LOCAL_ALLOC)
2095
          GC_init_thread_local(&(me->tlfs));
11,260✔
2096
#       endif
2097
        UNLOCK();
11,260✔
2098
        return GC_SUCCESS;
11,260✔
2099
    } else if ((me -> flags & FINISHED) != 0) {
104✔
2100
        /* This code is executed when a thread is registered from the   */
2101
        /* client thread key destructor.                                */
2102
#       ifdef NACL
2103
          GC_nacl_gc_thread_self = me;
2104
          GC_nacl_initialize_gc_thread();
2105
#       endif
2106
#       ifdef GC_DARWIN_THREADS
2107
          /* Reinitialize mach_thread to avoid thread_suspend fail      */
2108
          /* with MACH_SEND_INVALID_DEST error.                         */
2109
          me -> stop_info.mach_thread = mach_thread_self();
2110
#       endif
2111
        GC_record_stack_base(me, sb);
104✔
2112
        me -> flags &= ~FINISHED; /* but not DETACHED */
104✔
2113
#       ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
2114
          /* Since this could be executed from a thread destructor,     */
2115
          /* our signals might be blocked.                              */
2116
          GC_unblock_gc_signals();
2117
#       endif
2118
#       if defined(THREAD_LOCAL_ALLOC)
2119
          GC_init_thread_local(&(me->tlfs));
104✔
2120
#       endif
2121
#       if defined(GC_ENABLE_SUSPEND_THREAD) && !defined(GC_DARWIN_THREADS) \
2122
           && !defined(GC_OPENBSD_UTHREADS) && !defined(NACL) \
2123
           && !defined(PLATFORM_STOP_WORLD) && !defined(SN_TARGET_PSP2)
2124
          if ((me -> stop_info.ext_suspend_cnt & 1) != 0) {
104✔
2125
            GC_with_callee_saves_pushed(GC_suspend_self_blocked, (ptr_t)me);
×
2126
          }
2127
#       endif
2128
        UNLOCK();
104✔
2129
        return GC_SUCCESS;
104✔
2130
    } else {
2131
        UNLOCK();
×
2132
        return GC_DUPLICATE;
×
2133
    }
2134
}
2135

2136
struct start_info {
2137
    void *(*start_routine)(void *);
2138
    void *arg;
2139
    word flags;
2140
    sem_t registered;           /* 1 ==> in our thread table, but       */
2141
                                /* parent hasn't yet noticed.           */
2142
};
2143

2144
/* Called from GC_inner_start_routine().  Defined in this file to       */
2145
/* minimize the number of include files in pthread_start.c (because     */
2146
/* sem_t and sem_post() are not used in that file directly).            */
2147
GC_INNER_PTHRSTART GC_thread GC_start_rtn_prepare_thread(
13,869✔
2148
                                        void *(**pstart)(void *),
2149
                                        void **pstart_arg,
2150
                                        struct GC_stack_base *sb, void *arg)
2151
{
2152
    struct start_info * si = (struct start_info *)arg;
13,869✔
2153
    pthread_t self = pthread_self();
13,869✔
2154
    GC_thread me;
2155
    DCL_LOCK_STATE;
2156

2157
#   ifdef DEBUG_THREADS
2158
      GC_log_printf("Starting thread %p, pid= %ld, sp= %p\n",
2159
                    (void *)self, (long)getpid(), (void *)&arg);
2160
#   endif
2161
    LOCK();
13,869✔
2162
    me = GC_register_my_thread_inner(sb, self);
13,871✔
2163
    me -> flags = si -> flags;
13,871✔
2164
#   if defined(THREAD_LOCAL_ALLOC)
2165
      GC_init_thread_local(&(me->tlfs));
13,871✔
2166
#   endif
2167
    UNLOCK();
13,871✔
2168
    *pstart = si -> start_routine;
13,870✔
2169
#   ifdef DEBUG_THREADS
2170
      GC_log_printf("start_routine= %p\n", (void *)(signed_word)(*pstart));
2171
#   endif
2172
    *pstart_arg = si -> arg;
13,870✔
2173
    sem_post(&(si -> registered));      /* Last action on si.   */
13,870✔
2174
                                        /* OK to deallocate.    */
2175
    return me;
13,870✔
2176
}
2177

2178
#if !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
2179
  STATIC void * GC_start_routine(void * arg)
13,870✔
2180
  {
2181
#   ifdef INCLUDE_LINUX_THREAD_DESCR
2182
      struct GC_stack_base sb;
2183

2184
#     ifdef REDIRECT_MALLOC
2185
        /* GC_get_stack_base may call pthread_getattr_np, which can     */
2186
        /* unfortunately call realloc, which may allocate from an       */
2187
        /* unregistered thread.  This is unpleasant, since it might     */
2188
        /* force heap growth (or, even, heap overflow).                 */
2189
        GC_disable();
2190
#     endif
2191
      if (GC_get_stack_base(&sb) != GC_SUCCESS)
2192
        ABORT("Failed to get thread stack base");
2193
#     ifdef REDIRECT_MALLOC
2194
        GC_enable();
2195
#     endif
2196
      return GC_inner_start_routine(&sb, arg);
2197
#   else
2198
      return GC_call_with_stack_base(GC_inner_start_routine, arg);
13,870✔
2199
#   endif
2200
  }
2201

2202
  GC_API int WRAP_FUNC(pthread_create)(pthread_t *new_thread,
13,989✔
2203
                       GC_PTHREAD_CREATE_CONST pthread_attr_t *attr,
2204
                       void *(*start_routine)(void *), void *arg)
2205
  {
2206
    int result;
2207
    int detachstate;
2208
    word my_flags = 0;
13,989✔
2209
    struct start_info si;
2210
    DCL_LOCK_STATE;
2211
        /* This is otherwise saved only in an area mmapped by the thread */
2212
        /* library, which isn't visible to the collector.                */
2213

2214
    /* We resist the temptation to muck with the stack size here,       */
2215
    /* even if the default is unreasonably small.  That's the client's  */
2216
    /* responsibility.                                                  */
2217

2218
    INIT_REAL_SYMS();
2219
    if (!EXPECT(parallel_initialized, TRUE))
13,989✔
2220
      GC_init_parallel();
×
2221
    if (sem_init(&si.registered, GC_SEM_INIT_PSHARED, 0) != 0)
13,989✔
2222
      ABORT("sem_init failed");
×
2223

2224
    si.start_routine = start_routine;
13,989✔
2225
    si.arg = arg;
13,989✔
2226
    LOCK();
13,989✔
2227
    if (!EXPECT(GC_thr_initialized, TRUE))
13,989✔
2228
      GC_thr_init();
×
2229
#   ifdef GC_ASSERTIONS
2230
      {
2231
        size_t stack_size = 0;
13,989✔
2232
        if (NULL != attr) {
13,989✔
2233
          if (pthread_attr_getstacksize(attr, &stack_size) != 0)
11,405✔
2234
            ABORT("pthread_attr_getstacksize failed");
×
2235
        }
2236
        if (0 == stack_size) {
13,989✔
2237
           pthread_attr_t my_attr;
2238

2239
           if (pthread_attr_init(&my_attr) != 0)
2,584✔
2240
             ABORT("pthread_attr_init failed");
×
2241
           if (pthread_attr_getstacksize(&my_attr, &stack_size) != 0)
2,584✔
2242
             ABORT("pthread_attr_getstacksize failed");
×
2243
           (void)pthread_attr_destroy(&my_attr);
2,584✔
2244
        }
2245
        /* On Solaris 10, with default attr initialization,     */
2246
        /* stack_size remains 0.  Fudge it.                     */
2247
        if (0 == stack_size) {
13,989✔
2248
#           ifndef SOLARIS
2249
              WARN("Failed to get stack size for assertion checking\n", 0);
×
2250
#           endif
2251
            stack_size = 1000000;
×
2252
        }
2253
        GC_ASSERT(stack_size >= 65536);
13,989✔
2254
        /* Our threads may need to do some work for the GC.     */
2255
        /* Ridiculously small threads won't work, and they      */
2256
        /* probably wouldn't work anyway.                       */
2257
      }
2258
#   endif
2259
    if (NULL == attr) {
13,989✔
2260
        detachstate = PTHREAD_CREATE_JOINABLE;
2,584✔
2261
    } else {
2262
        if (pthread_attr_getdetachstate(attr, &detachstate) != 0)
11,405✔
2263
            ABORT("pthread_attr_getdetachstate failed");
×
2264
    }
2265
    if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
13,989✔
2266
    si.flags = my_flags;
13,989✔
2267
    UNLOCK();
13,989✔
2268
#   ifdef DEBUG_THREADS
2269
      GC_log_printf("About to start new thread from thread %p\n",
2270
                    (void *)pthread_self());
2271
#   endif
2272
#   ifdef PARALLEL_MARK
2273
      if (EXPECT(!GC_parallel && available_markers_m1 > 0, FALSE))
13,989✔
2274
        GC_start_mark_threads();
11✔
2275
#   endif
2276
    set_need_to_lock();
13,989✔
2277
    result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine,
13,989✔
2278
                                       &si);
2279

2280
    /* Wait until child has been added to the thread table.             */
2281
    /* This also ensures that we hold onto the stack-allocated si until */
2282
    /* the child is done with it.                                       */
2283
    if (0 == result) {
13,867✔
2284
        IF_CANCEL(int cancel_state;)
2285

2286
#       ifdef DEBUG_THREADS
2287
            /* new_thread is non-NULL because pthread_create requires it. */
2288
            GC_log_printf("Started thread %p\n", (void *)(*new_thread));
2289
#       endif
2290
        DISABLE_CANCEL(cancel_state);
13,868✔
2291
                /* pthread_create is not a cancellation point. */
2292
        while (0 != sem_wait(&si.registered)) {
27,738✔
2293
#           if defined(GC_HAIKU_THREADS)
2294
              /* To workaround some bug in Haiku semaphores. */
2295
              if (EACCES == errno) continue;
2296
#           endif
2297
            if (EINTR != errno) ABORT("sem_wait failed");
×
2298
        }
2299
        RESTORE_CANCEL(cancel_state);
13,869✔
2300
    }
2301
    sem_destroy(&si.registered);
13,869✔
2302
    return(result);
13,871✔
2303
  }
2304
#endif /* !SN_TARGET_ORBIS && !SN_TARGET_PSP2 */
2305

2306
#if defined(USE_SPIN_LOCK) || !defined(NO_PTHREAD_TRYLOCK)
2307
/* Spend a few cycles in a way that can't introduce contention with     */
2308
/* other threads.                                                       */
2309
#define GC_PAUSE_SPIN_CYCLES 10
2310
STATIC void GC_pause(void)
61,462,101✔
2311
{
2312
    int i;
2313

2314
    for (i = 0; i < GC_PAUSE_SPIN_CYCLES; ++i) {
675,111,754✔
2315
        /* Something that's unlikely to be optimized away. */
2316
#     if defined(AO_HAVE_compiler_barrier) \
2317
         && !defined(BASE_ATOMIC_OPS_EMULATED)
2318
        AO_compiler_barrier();
613,649,653✔
2319
#     else
2320
        GC_noop1(i);
2321
#     endif
2322
    }
2323
}
61,462,101✔
2324
#endif
2325

2326
#ifndef SPIN_MAX
2327
# define SPIN_MAX 128   /* Maximum number of calls to GC_pause before   */
2328
                        /* give up.                                     */
2329
#endif
2330

2331
GC_INNER volatile GC_bool GC_collecting = FALSE;
2332
                        /* A hint that we're in the collector and       */
2333
                        /* holding the allocation lock for an           */
2334
                        /* extended period.                             */
2335

2336
#if (!defined(USE_SPIN_LOCK) && !defined(NO_PTHREAD_TRYLOCK)) \
2337
        || defined(PARALLEL_MARK)
2338
/* If we don't want to use the below spinlock implementation, either    */
2339
/* because we don't have a GC_test_and_set implementation, or because   */
2340
/* we don't want to risk sleeping, we can still try spinning on         */
2341
/* pthread_mutex_trylock for a while.  This appears to be very          */
2342
/* beneficial in many cases.                                            */
2343
/* I suspect that under high contention this is nearly always better    */
2344
/* than the spin lock.  But it's a bit slower on a uniprocessor.        */
2345
/* Hence we still default to the spin lock.                             */
2346
/* This is also used to acquire the mark lock for the parallel          */
2347
/* marker.                                                              */
2348

2349
/* Here we use a strict exponential backoff scheme.  I don't know       */
2350
/* whether that's better or worse than the above.  We eventually        */
2351
/* yield by calling pthread_mutex_lock(); it never makes sense to       */
2352
/* explicitly sleep.                                                    */
2353

2354
/* #define LOCK_STATS */
2355
/* Note that LOCK_STATS requires AO_HAVE_test_and_set.  */
2356
#ifdef LOCK_STATS
2357
  volatile AO_t GC_spin_count = 0;
2358
  volatile AO_t GC_block_count = 0;
2359
  volatile AO_t GC_unlocked_count = 0;
2360
#endif
2361

2362
STATIC void GC_generic_lock(pthread_mutex_t * lock)
85,570,744✔
2363
{
2364
#ifndef NO_PTHREAD_TRYLOCK
2365
    unsigned pause_length = 1;
85,570,744✔
2366
    unsigned i;
2367

2368
    if (0 == pthread_mutex_trylock(lock)) {
85,570,744✔
2369
#       ifdef LOCK_STATS
2370
            (void)AO_fetch_and_add1(&GC_unlocked_count);
2371
#       endif
2372
        return;
86,855,035✔
2373
    }
2374
    for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
5,951,933✔
2375
        for (i = 0; i < pause_length; ++i) {
67,355,782✔
2376
            GC_pause();
61,439,906✔
2377
        }
2378
        switch(pthread_mutex_trylock(lock)) {
5,915,876✔
2379
            case 0:
2380
#               ifdef LOCK_STATS
2381
                    (void)AO_fetch_and_add1(&GC_spin_count);
2382
#               endif
2383
                return;
2,559,176✔
2384
            case EBUSY:
2385
                break;
3,357,550✔
2386
            default:
2387
                ABORT("Unexpected error from pthread_mutex_trylock");
×
2388
        }
2389
    }
2390
#endif /* !NO_PTHREAD_TRYLOCK */
2391
#   ifdef LOCK_STATS
2392
        (void)AO_fetch_and_add1(&GC_block_count);
2393
#   endif
2394
    pthread_mutex_lock(lock);
37,431✔
2395
}
2396

2397
#endif /* !USE_SPIN_LOCK || ... */
2398

2399
#if defined(AO_HAVE_char_load) && !defined(BASE_ATOMIC_OPS_EMULATED)
2400
# define is_collecting() \
2401
                ((GC_bool)AO_char_load((unsigned char *)&GC_collecting))
2402
#else
2403
  /* GC_collecting is a hint, a potential data race between     */
2404
  /* GC_lock() and ENTER/EXIT_GC() is OK to ignore.             */
2405
# define is_collecting() GC_collecting
2406
#endif
2407

2408
#if defined(USE_SPIN_LOCK)
2409

2410
/* Reasonably fast spin locks.  Basically the same implementation */
2411
/* as STL alloc.h.  This isn't really the right way to do this.   */
2412
/* but until the POSIX scheduling mess gets straightened out ...  */
2413

2414
GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
2415

2416
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor  */
2417
# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor   */
2418

2419
  static volatile AO_t spin_max = low_spin_max;
2420
  static volatile AO_t last_spins = 0;
2421
                                /* A potential data race between        */
2422
                                /* threads invoking GC_lock which reads */
2423
                                /* and updates spin_max and last_spins  */
2424
                                /* could be ignored because these       */
2425
                                /* variables are hints only.            */
2426

2427
GC_INNER void GC_lock(void)
2428
{
2429
    unsigned my_spin_max;
2430
    unsigned my_last_spins;
2431
    unsigned i;
2432

2433
    if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
2434
        return;
2435
    }
2436
    my_spin_max = (unsigned)AO_load(&spin_max);
2437
    my_last_spins = (unsigned)AO_load(&last_spins);
2438
    for (i = 0; i < my_spin_max; i++) {
2439
        if (is_collecting() || GC_nprocs == 1)
2440
          goto yield;
2441
        if (i < my_last_spins/2) {
2442
            GC_pause();
2443
            continue;
2444
        }
2445
        if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
2446
            /*
2447
             * got it!
2448
             * Spinning worked.  Thus we're probably not being scheduled
2449
             * against the other process with which we were contending.
2450
             * Thus it makes sense to spin longer the next time.
2451
             */
2452
            AO_store(&last_spins, (AO_t)i);
2453
            AO_store(&spin_max, (AO_t)high_spin_max);
2454
            return;
2455
        }
2456
    }
2457
    /* We are probably being scheduled against the other process.  Sleep. */
2458
    AO_store(&spin_max, (AO_t)low_spin_max);
2459
yield:
2460
    for (i = 0;; ++i) {
2461
        if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
2462
            return;
2463
        }
2464
#       define SLEEP_THRESHOLD 12
2465
                /* Under Linux very short sleeps tend to wait until     */
2466
                /* the current time quantum expires.  On old Linux      */
2467
                /* kernels nanosleep (<= 2 ms) just spins.              */
2468
                /* (Under 2.4, this happens only for real-time          */
2469
                /* processes.)  We want to minimize both behaviors      */
2470
                /* here.                                                */
2471
        if (i < SLEEP_THRESHOLD) {
2472
            sched_yield();
2473
        } else {
2474
            struct timespec ts;
2475

2476
            if (i > 24) i = 24;
2477
                        /* Don't wait for more than about 15 ms,        */
2478
                        /* even under extreme contention.               */
2479
            ts.tv_sec = 0;
2480
            ts.tv_nsec = 1 << i;
2481
            nanosleep(&ts, 0);
2482
        }
2483
    }
2484
}
2485

2486
#elif defined(USE_PTHREAD_LOCKS)
2487

2488
# ifndef NO_PTHREAD_TRYLOCK
2489
    GC_INNER void GC_lock(void)
83,378,389✔
2490
    {
2491
      if (1 == GC_nprocs || is_collecting()) {
83,378,389✔
2492
        pthread_mutex_lock(&GC_allocate_ml);
252,966✔
2493
      } else {
2494
        GC_generic_lock(&GC_allocate_ml);
83,125,423✔
2495
      }
2496
    }
87,168,095✔
2497
# elif defined(GC_ASSERTIONS)
2498
    GC_INNER void GC_lock(void)
2499
    {
2500
      pthread_mutex_lock(&GC_allocate_ml);
2501
    }
2502
# endif
2503

2504
#endif /* !USE_SPIN_LOCK && USE_PTHREAD_LOCKS */
2505

2506
#ifdef PARALLEL_MARK
2507

2508
# ifdef GC_ASSERTIONS
2509
    STATIC unsigned long GC_mark_lock_holder = NO_THREAD;
2510
#   define SET_MARK_LOCK_HOLDER \
2511
                (void)(GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()))
2512
#   define UNSET_MARK_LOCK_HOLDER \
2513
                do { \
2514
                  GC_ASSERT(GC_mark_lock_holder \
2515
                                == NUMERIC_THREAD_ID(pthread_self())); \
2516
                  GC_mark_lock_holder = NO_THREAD; \
2517
                } while (0)
2518
# else
2519
#   define SET_MARK_LOCK_HOLDER (void)0
2520
#   define UNSET_MARK_LOCK_HOLDER (void)0
2521
# endif /* !GC_ASSERTIONS */
2522

2523
static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
2524

2525
static void setup_mark_lock(void)
31✔
2526
{
2527
# ifdef GLIBC_2_19_TSX_BUG
2528
    pthread_mutexattr_t mattr;
2529
    int glibc_minor = -1;
31✔
2530
    int glibc_major = GC_parse_version(&glibc_minor, gnu_get_libc_version());
31✔
2531

2532
    if (glibc_major > 2 || (glibc_major == 2 && glibc_minor >= 19)) {
31✔
2533
      /* TODO: disable this workaround for glibc with fixed TSX */
2534
      /* This disables lock elision to workaround a bug in glibc 2.19+  */
2535
      if (0 != pthread_mutexattr_init(&mattr)) {
31✔
2536
        ABORT("pthread_mutexattr_init failed");
×
2537
      }
2538
      if (0 != pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_NORMAL)) {
31✔
2539
        ABORT("pthread_mutexattr_settype failed");
×
2540
      }
2541
      if (0 != pthread_mutex_init(&mark_mutex, &mattr)) {
31✔
2542
        ABORT("pthread_mutex_init failed");
×
2543
      }
2544
      (void)pthread_mutexattr_destroy(&mattr);
31✔
2545
    }
2546
# endif
2547
}
31✔
2548

2549
GC_INNER void GC_acquire_mark_lock(void)
2,477,706✔
2550
{
2551
#   if defined(NUMERIC_THREAD_ID_UNIQUE) && !defined(THREAD_SANITIZER)
2552
      GC_ASSERT(GC_mark_lock_holder != NUMERIC_THREAD_ID(pthread_self()));
2,477,706✔
2553
#   endif
2554
    GC_generic_lock(&mark_mutex);
2,477,922✔
2555
    SET_MARK_LOCK_HOLDER;
2,479,392✔
2556
}
2,479,392✔
2557

2558
GC_INNER void GC_release_mark_lock(void)
2,479,339✔
2559
{
2560
    UNSET_MARK_LOCK_HOLDER;
2,479,339✔
2561
    if (pthread_mutex_unlock(&mark_mutex) != 0) {
2,479,339✔
2562
        ABORT("pthread_mutex_unlock failed");
×
2563
    }
2564
}
2,479,334✔
2565

2566
/* Collector must wait for a freelist builders for 2 reasons:           */
2567
/* 1) Mark bits may still be getting examined without lock.             */
2568
/* 2) Partial free lists referenced only by locals may not be scanned   */
2569
/*    correctly, e.g. if they contain "pointer-free" objects, since the */
2570
/*    free-list link may be ignored.                                    */
2571
STATIC void GC_wait_builder(void)
273✔
2572
{
2573
    ASSERT_CANCEL_DISABLED();
273✔
2574
    UNSET_MARK_LOCK_HOLDER;
273✔
2575
    if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
273✔
2576
        ABORT("pthread_cond_wait failed");
×
2577
    }
2578
    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
273✔
2579
    SET_MARK_LOCK_HOLDER;
273✔
2580
}
273✔
2581

2582
GC_INNER void GC_wait_for_reclaim(void)
7,693✔
2583
{
2584
    GC_acquire_mark_lock();
7,693✔
2585
    while (GC_fl_builder_count > 0) {
15,659✔
2586
        GC_wait_builder();
273✔
2587
    }
2588
    GC_release_mark_lock();
7,693✔
2589
}
7,693✔
2590

2591
# if defined(CAN_HANDLE_FORK) && defined(THREAD_SANITIZER)
2592
    /* Identical to GC_wait_for_reclaim() but with the no_sanitize      */
2593
    /* attribute as a workaround for TSan which does not notice that    */
2594
    /* the GC lock is acquired in fork_prepare_proc().                  */
2595
    GC_ATTR_NO_SANITIZE_THREAD
2596
    static void wait_for_reclaim_atfork(void)
2597
    {
2598
      GC_acquire_mark_lock();
2599
      while (GC_fl_builder_count > 0)
2600
        GC_wait_builder();
2601
      GC_release_mark_lock();
2602
    }
2603
# endif /* CAN_HANDLE_FORK && THREAD_SANITIZER */
2604

2605
GC_INNER void GC_notify_all_builder(void)
939,498✔
2606
{
2607
    GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
939,498✔
2608
    if (pthread_cond_broadcast(&builder_cv) != 0) {
939,498✔
2609
        ABORT("pthread_cond_broadcast failed");
×
2610
    }
2611
}
939,498✔
2612

2613
GC_INNER void GC_wait_marker(void)
55,898✔
2614
{
2615
    ASSERT_CANCEL_DISABLED();
55,898✔
2616
    GC_ASSERT(GC_parallel);
55,898✔
2617
    UNSET_MARK_LOCK_HOLDER;
55,898✔
2618
    if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
55,898✔
2619
        ABORT("pthread_cond_wait failed");
×
2620
    }
2621
    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
55,843✔
2622
    SET_MARK_LOCK_HOLDER;
55,843✔
2623
}
55,843✔
2624

2625
GC_INNER void GC_notify_all_marker(void)
59,034✔
2626
{
2627
    GC_ASSERT(GC_parallel);
59,034✔
2628
    if (pthread_cond_broadcast(&mark_cv) != 0) {
59,034✔
2629
        ABORT("pthread_cond_broadcast failed");
×
2630
    }
2631
}
59,034✔
2632

2633
#endif /* PARALLEL_MARK */
2634

2635
#ifdef PTHREAD_REGISTER_CANCEL_WEAK_STUBS
2636
  /* Workaround "undefined reference" linkage errors on some targets. */
2637
  EXTERN_C_BEGIN
2638
  extern void __pthread_register_cancel(void) __attribute__((__weak__));
2639
  extern void __pthread_unregister_cancel(void) __attribute__((__weak__));
2640
  EXTERN_C_END
2641

2642
  void __pthread_register_cancel(void) {}
2643
  void __pthread_unregister_cancel(void) {}
2644
#endif
2645

2646
#endif /* GC_PTHREADS */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc