• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1451

10 Mar 2023 05:47PM UTC coverage: 76.182% (-0.5%) from 76.638%
1451

push

travis-ci-com

ivmai
Change enable_threads default to off if threads are unsupported (CMake)

PR #535 (bdwgc).

In case of an absent multi-threading support for the target platform,
like for Emscripten or WASI, cmake build should not fail by default
(i.e. threads support should not be on by default).  This matches the
relevant behavior of configure one.

* CMakeLists.txt (default_enable_threads): New variable (ON by
default).
* CMakeLists.txt: Call find_package(Threads) quietly regardless of
enable_threads.
* CMakeLists.txt [EMSCRIPTEN || WASI || !(CMAKE_USE_PTHREADS_INIT ||
CMAKE_USE_WIN32_THREADS_INIT)] (default_enable_threads): Set to OFF.
* CMakeLists.txt (enable_threads): Specify the default value to
default_enable_threads.
* CMakeLists.txt: Check CMAKE_USE_PTHREADS_INIT and
CMAKE_USE_WIN32_THREADS_INIT only if enable_threads.

Co-authored-by: Ivan Maidanski <ivmai@mail.ru>

7734 of 10152 relevant lines covered (76.18%)

8264643.27 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

82.46
/pthread_support.c
1
/*
2
 * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
3
 * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
4
 * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
5
 * Copyright (c) 2000-2008 by Hewlett-Packard Company.  All rights reserved.
6
 * Copyright (c) 2008-2022 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/pthread_support.h"
19

20
/*
21
 * Support code originally for LinuxThreads, the clone()-based kernel
22
 * thread package for Linux which is included in libc6.
23
 *
24
 * This code no doubt makes some assumptions beyond what is
25
 * guaranteed by the pthread standard, though it now does
26
 * very little of that.  It now also supports NPTL, and many
27
 * other Posix thread implementations.  We are trying to merge
28
 * all flavors of pthread support code into this file.
29
 */
30

31
#ifdef THREADS
32

33
#ifdef GC_PTHREADS
34
# include <errno.h>
35
# ifndef GC_WIN32_PTHREADS
36
#   include <unistd.h>
37
# endif
38
# if defined(GC_DARWIN_THREADS) \
39
     || (defined(GC_WIN32_THREADS) && defined(EMULATE_PTHREAD_SEMAPHORE))
40
#   include "private/darwin_semaphore.h"
41
# elif !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
42
#   include <semaphore.h>
43
# endif
44
#endif /* GC_PTHREADS */
45

46
#ifndef GC_WIN32_THREADS
47
# include <sched.h>
48
# include <time.h>
49
# if !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
50
#   if !defined(GC_RTEMS_PTHREADS)
51
#     include <sys/mman.h>
52
#   endif
53
#   include <sys/time.h>
54
#   include <sys/types.h>
55
#   include <sys/stat.h>
56
#   include <fcntl.h>
57
# endif
58
# include <signal.h>
59
#endif /* !GC_WIN32_THREADS */
60

61
#if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
62
# include <sys/sysctl.h>
63
#endif
64

65
#if defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
66
# include <sys/param.h>
67
# include <sys/sysctl.h>
68
#endif
69

70
#if defined(GC_DGUX386_THREADS)
71
# include <sys/dg_sys_info.h>
72
# include <sys/_int_psem.h>
73
  /* sem_t is an uint in DG/UX */
74
  typedef unsigned int sem_t;
75
#endif /* GC_DGUX386_THREADS */
76

77
#if defined(GC_PTHREADS) \
78
    && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
79
  /* Undefine macros used to redirect pthread primitives.       */
80
# undef pthread_create
81
# ifndef GC_NO_PTHREAD_SIGMASK
82
#   undef pthread_sigmask
83
# endif
84
# ifndef GC_NO_PTHREAD_CANCEL
85
#   undef pthread_cancel
86
# endif
87
# ifdef GC_HAVE_PTHREAD_EXIT
88
#   undef pthread_exit
89
# endif
90
# undef pthread_join
91
# undef pthread_detach
92
# if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
93
     && !defined(_PTHREAD_USE_PTDNAM_)
94
    /* Restore the original mangled names on Tru64 UNIX.        */
95
#   define pthread_create __pthread_create
96
#   define pthread_join   __pthread_join
97
#   define pthread_detach __pthread_detach
98
#   ifndef GC_NO_PTHREAD_CANCEL
99
#     define pthread_cancel __pthread_cancel
100
#   endif
101
#   ifdef GC_HAVE_PTHREAD_EXIT
102
#     define pthread_exit __pthread_exit
103
#   endif
104
# endif /* GC_OSF1_THREADS */
105
#endif /* GC_PTHREADS */
106

107
#if !defined(GC_WIN32_THREADS) \
108
    && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
109
  /* TODO: Enable GC_USE_DLOPEN_WRAP for Cygwin? */
110

111
# ifdef GC_USE_LD_WRAP
112
#   define WRAP_FUNC(f) __wrap_##f
113
#   define REAL_FUNC(f) __real_##f
114
    int REAL_FUNC(pthread_create)(pthread_t *,
115
                                  GC_PTHREAD_CREATE_CONST pthread_attr_t *,
116
                                  void *(*start_routine)(void *), void *);
117
    int REAL_FUNC(pthread_join)(pthread_t, void **);
118
    int REAL_FUNC(pthread_detach)(pthread_t);
119
#   ifndef GC_NO_PTHREAD_SIGMASK
120
      int REAL_FUNC(pthread_sigmask)(int, const sigset_t *, sigset_t *);
121
#   endif
122
#   ifndef GC_NO_PTHREAD_CANCEL
123
      int REAL_FUNC(pthread_cancel)(pthread_t);
124
#   endif
125
#   ifdef GC_HAVE_PTHREAD_EXIT
126
      void REAL_FUNC(pthread_exit)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
127
#   endif
128
# elif defined(GC_USE_DLOPEN_WRAP)
129
#   include <dlfcn.h>
130
#   define WRAP_FUNC(f) f
131
#   define REAL_FUNC(f) GC_real_##f
132
    /* We define both GC_f and plain f to be the wrapped function.  */
133
    /* In that way plain calls work, as do calls from files that    */
134
    /* included gc.h, which redefined f to GC_f.                    */
135
    /* FIXME: Needs work for DARWIN and True64 (OSF1) */
136
    typedef int (* GC_pthread_create_t)(pthread_t *,
137
                                GC_PTHREAD_CREATE_CONST pthread_attr_t *,
138
                                void *(*)(void *), void *);
139
    static GC_pthread_create_t REAL_FUNC(pthread_create);
140
#   ifndef GC_NO_PTHREAD_SIGMASK
141
      typedef int (* GC_pthread_sigmask_t)(int, const sigset_t *, sigset_t *);
142
      static GC_pthread_sigmask_t REAL_FUNC(pthread_sigmask);
143
#   endif
144
    typedef int (* GC_pthread_join_t)(pthread_t, void **);
145
    static GC_pthread_join_t REAL_FUNC(pthread_join);
146
    typedef int (* GC_pthread_detach_t)(pthread_t);
147
    static GC_pthread_detach_t REAL_FUNC(pthread_detach);
148
#   ifndef GC_NO_PTHREAD_CANCEL
149
      typedef int (* GC_pthread_cancel_t)(pthread_t);
150
      static GC_pthread_cancel_t REAL_FUNC(pthread_cancel);
151
#   endif
152
#   ifdef GC_HAVE_PTHREAD_EXIT
153
      typedef void (* GC_pthread_exit_t)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
154
      static GC_pthread_exit_t REAL_FUNC(pthread_exit);
155
#   endif
156
# else
157
#   define WRAP_FUNC(f) GC_##f
158
#   ifdef GC_DGUX386_THREADS
159
#     define REAL_FUNC(f) __d10_##f
160
#   else
161
#     define REAL_FUNC(f) f
162
#   endif
163
# endif /* !GC_USE_LD_WRAP && !GC_USE_DLOPEN_WRAP */
164

165
# if defined(GC_USE_LD_WRAP) || defined(GC_USE_DLOPEN_WRAP)
166
    /* Define GC_ functions as aliases for the plain ones, which will   */
167
    /* be intercepted.  This allows files which include gc.h, and hence */
168
    /* generate references to the GC_ symbols, to see the right ones.   */
169
    GC_API int GC_pthread_create(pthread_t *t,
170
                                 GC_PTHREAD_CREATE_CONST pthread_attr_t *a,
171
                                 void * (*fn)(void *), void *arg)
172
    {
173
      return pthread_create(t, a, fn, arg);
174
    }
175

176
#   ifndef GC_NO_PTHREAD_SIGMASK
177
      GC_API int GC_pthread_sigmask(int how, const sigset_t *mask,
178
                                    sigset_t *old)
179
      {
180
        return pthread_sigmask(how, mask, old);
181
      }
182
#   endif /* !GC_NO_PTHREAD_SIGMASK */
183

184
    GC_API int GC_pthread_join(pthread_t t, void **res)
185
    {
186
      return pthread_join(t, res);
187
    }
188

189
    GC_API int GC_pthread_detach(pthread_t t)
190
    {
191
      return pthread_detach(t);
192
    }
193

194
#   ifndef GC_NO_PTHREAD_CANCEL
195
      GC_API int GC_pthread_cancel(pthread_t t)
196
      {
197
        return pthread_cancel(t);
198
      }
199
#   endif /* !GC_NO_PTHREAD_CANCEL */
200

201
#   ifdef GC_HAVE_PTHREAD_EXIT
202
      GC_API GC_PTHREAD_EXIT_ATTRIBUTE void GC_pthread_exit(void *retval)
203
      {
204
        pthread_exit(retval);
205
      }
206
#   endif
207
# endif /* GC_USE_LD_WRAP || GC_USE_DLOPEN_WRAP */
208

209
# ifdef GC_USE_DLOPEN_WRAP
210
    STATIC GC_bool GC_syms_initialized = FALSE;
211

212
    STATIC void GC_init_real_syms(void)
213
    {
214
      void *dl_handle;
215

216
      GC_ASSERT(!GC_syms_initialized);
217
#     ifdef RTLD_NEXT
218
        dl_handle = RTLD_NEXT;
219
#     else
220
        dl_handle = dlopen("libpthread.so.0", RTLD_LAZY);
221
        if (NULL == dl_handle) {
222
          dl_handle = dlopen("libpthread.so", RTLD_LAZY); /* without ".0" */
223
          if (NULL == dl_handle) ABORT("Couldn't open libpthread");
224
        }
225
#     endif
226
      REAL_FUNC(pthread_create) = (GC_pthread_create_t)(word)
227
                                dlsym(dl_handle, "pthread_create");
228
#     ifdef RTLD_NEXT
229
        if (REAL_FUNC(pthread_create) == 0)
230
          ABORT("pthread_create not found"
231
                " (probably -lgc is specified after -lpthread)");
232
#     endif
233
#     ifndef GC_NO_PTHREAD_SIGMASK
234
        REAL_FUNC(pthread_sigmask) = (GC_pthread_sigmask_t)(word)
235
                                dlsym(dl_handle, "pthread_sigmask");
236
#     endif
237
      REAL_FUNC(pthread_join) = (GC_pthread_join_t)(word)
238
                                dlsym(dl_handle, "pthread_join");
239
      REAL_FUNC(pthread_detach) = (GC_pthread_detach_t)(word)
240
                                dlsym(dl_handle, "pthread_detach");
241
#     ifndef GC_NO_PTHREAD_CANCEL
242
        REAL_FUNC(pthread_cancel) = (GC_pthread_cancel_t)(word)
243
                                dlsym(dl_handle, "pthread_cancel");
244
#     endif
245
#     ifdef GC_HAVE_PTHREAD_EXIT
246
        REAL_FUNC(pthread_exit) = (GC_pthread_exit_t)(word)
247
                                dlsym(dl_handle, "pthread_exit");
248
#     endif
249
      GC_syms_initialized = TRUE;
250
    }
251

252
#   define INIT_REAL_SYMS() if (EXPECT(GC_syms_initialized, TRUE)) {} \
253
                            else GC_init_real_syms()
254
# else
255
#   define INIT_REAL_SYMS() (void)0
256
# endif /* !GC_USE_DLOPEN_WRAP */
257

258
#else
259
# define WRAP_FUNC(f) GC_##f
260
# define REAL_FUNC(f) f
261
# define INIT_REAL_SYMS() (void)0
262
#endif /* GC_WIN32_THREADS */
263

264
#ifndef GC_ALWAYS_MULTITHREADED
265
  GC_INNER GC_bool GC_need_to_lock = FALSE;
266
#endif
267

268
#ifdef THREAD_LOCAL_ALLOC
269
  /* We must explicitly mark ptrfree and gcj free lists, since the free */
270
  /* list links wouldn't otherwise be found.  We also set them in the   */
271
  /* normal free lists, since that involves touching less memory than   */
272
  /* if we scanned them normally.                                       */
273
  GC_INNER void GC_mark_thread_local_free_lists(void)
21,772✔
274
  {
275
    int i;
276
    GC_thread p;
277

278
    for (i = 0; i < THREAD_TABLE_SZ; ++i) {
5,595,404✔
279
      for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
5,667,829✔
280
        if (!KNOWN_FINISHED(p))
94,197✔
281
          GC_mark_thread_local_fls_for(&p->tlfs);
86,088✔
282
      }
283
    }
284
  }
21,772✔
285

286
# if defined(GC_ASSERTIONS)
287
    /* Check that all thread-local free-lists are completely marked.    */
288
    /* Also check that thread-specific-data structures are marked.      */
289
    void GC_check_tls(void)
21,775✔
290
    {
291
        int i;
292
        GC_thread p;
293

294
        for (i = 0; i < THREAD_TABLE_SZ; ++i) {
5,596,499✔
295
          for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
5,669,008✔
296
            if (!KNOWN_FINISHED(p))
94,284✔
297
              GC_check_tls_for(&p->tlfs);
86,175✔
298
          }
299
        }
300
#       if defined(USE_CUSTOM_SPECIFIC)
301
          if (GC_thread_key != 0)
21,777✔
302
            GC_check_tsd_marks(GC_thread_key);
21,745✔
303
#       endif
304
    }
21,777✔
305
# endif /* GC_ASSERTIONS */
306
#endif /* THREAD_LOCAL_ALLOC */
307

308
#ifdef GC_WIN32_THREADS
309
  /* A macro for functions and variables which should be accessible     */
310
  /* from win32_threads.c but otherwise could be static.                */
311
# define GC_INNER_WIN32THREAD GC_INNER
312
#else
313
# define GC_INNER_WIN32THREAD STATIC
314
#endif
315

316
#ifdef PARALLEL_MARK
317

318
# if defined(GC_WIN32_THREADS) || defined(USE_PROC_FOR_LIBRARIES) \
319
     || (defined(IA64) && (defined(HAVE_PTHREAD_ATTR_GET_NP) \
320
                           || defined(HAVE_PTHREAD_GETATTR_NP)))
321
    GC_INNER_WIN32THREAD ptr_t GC_marker_sp[MAX_MARKERS - 1] = {0};
322
                                        /* The cold end of the stack    */
323
                                        /* for markers.                 */
324
# endif /* GC_WIN32_THREADS || USE_PROC_FOR_LIBRARIES */
325

326
# if defined(IA64) && defined(USE_PROC_FOR_LIBRARIES)
327
    static ptr_t marker_bsp[MAX_MARKERS - 1] = {0};
328
# endif
329

330
# if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
331
    static mach_port_t marker_mach_threads[MAX_MARKERS - 1] = {0};
332

333
    /* Used only by GC_suspend_thread_list().   */
334
    GC_INNER GC_bool GC_is_mach_marker(thread_act_t thread)
335
    {
336
      int i;
337
      for (i = 0; i < GC_markers_m1; i++) {
338
        if (marker_mach_threads[i] == thread)
339
          return TRUE;
340
      }
341
      return FALSE;
342
    }
343
# endif /* GC_DARWIN_THREADS */
344

345
# ifdef HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG /* NetBSD */
346
    static void set_marker_thread_name(unsigned id)
347
    {
348
      int err = pthread_setname_np(pthread_self(), "GC-marker-%zu",
349
                                   (void*)(size_t)id);
350
      if (EXPECT(err != 0, FALSE))
351
        WARN("pthread_setname_np failed, errno= %" WARN_PRIdPTR "\n",
352
             (signed_word)err);
353
    }
354
# elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID) \
355
       || defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
356
    static void set_marker_thread_name(unsigned id)
56✔
357
    {
358
      char name_buf[16]; /* pthread_setname_np may fail for longer names */
359
      int len = sizeof("GC-marker-") - 1;
56✔
360

361
      /* Compose the name manually as snprintf may be unavailable or    */
362
      /* "%u directive output may be truncated" warning may occur.      */
363
      BCOPY("GC-marker-", name_buf, len);
56✔
364
      if (id >= 10)
56✔
365
        name_buf[len++] = (char)('0' + (id / 10) % 10);
×
366
      name_buf[len] = (char)('0' + id % 10);
56✔
367
      name_buf[len + 1] = '\0';
56✔
368

369
#     ifdef HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID /* iOS, OS X */
370
        (void)pthread_setname_np(name_buf);
371
#     else /* Linux, Solaris, etc. */
372
        if (EXPECT(pthread_setname_np(pthread_self(), name_buf) != 0, FALSE))
56✔
373
          WARN("pthread_setname_np failed\n", 0);
×
374
#     endif
375
    }
56✔
376
# elif defined(GC_WIN32_THREADS) && !defined(MSWINCE)
377
    /* A pointer to SetThreadDescription() which is available since     */
378
    /* Windows 10.  The function prototype is in processthreadsapi.h.   */
379
    static FARPROC setThreadDescription_fn;
380

381
    GC_INNER void GC_init_win32_thread_naming(HMODULE hK32)
382
    {
383
      if (hK32)
384
        setThreadDescription_fn = GetProcAddress(hK32, "SetThreadDescription");
385
    }
386

387
    static void set_marker_thread_name(unsigned id)
388
    {
389
      WCHAR name_buf[16];
390
      int len = sizeof(L"GC-marker-") / sizeof(WCHAR) - 1;
391
      HRESULT hr;
392

393
      if (!setThreadDescription_fn) return; /* missing SetThreadDescription */
394

395
      /* Compose the name manually as swprintf may be unavailable.      */
396
      BCOPY(L"GC-marker-", name_buf, len * sizeof(WCHAR));
397
      if (id >= 10)
398
        name_buf[len++] = (WCHAR)('0' + (id / 10) % 10);
399
      name_buf[len] = (WCHAR)('0' + id % 10);
400
      name_buf[len + 1] = 0;
401

402
      /* Invoke SetThreadDescription().  Cast the function pointer to word  */
403
      /* first to avoid "incompatible function types" compiler warning.     */
404
      hr = (*(HRESULT (WINAPI *)(HANDLE, const WCHAR *))
405
            (word)setThreadDescription_fn)(GetCurrentThread(), name_buf);
406
      if (hr < 0)
407
        WARN("SetThreadDescription failed\n", 0);
408
    }
409
# else
410
#   define set_marker_thread_name(id) (void)(id)
411
# endif
412

413
  GC_INNER_WIN32THREAD
414
# ifdef GC_PTHREADS_PARAMARK
415
    void *GC_mark_thread(void *id)
56✔
416
# elif defined(MSWINCE)
417
    DWORD WINAPI GC_mark_thread(LPVOID id)
418
# else
419
    unsigned __stdcall GC_mark_thread(void *id)
420
# endif
421
  {
422
    word my_mark_no = 0;
56✔
423
    IF_CANCEL(int cancel_state;)
424

425
    if ((word)id == GC_WORD_MAX) return 0; /* to prevent a compiler warning */
56✔
426
    DISABLE_CANCEL(cancel_state);
56✔
427
                         /* Mark threads are not cancellable; they      */
428
                         /* should be invisible to client.              */
429
    set_marker_thread_name((unsigned)(word)id);
56✔
430
#   if defined(GC_WIN32_THREADS) || defined(USE_PROC_FOR_LIBRARIES) \
431
       || (defined(IA64) && (defined(HAVE_PTHREAD_ATTR_GET_NP) \
432
                             || defined(HAVE_PTHREAD_GETATTR_NP)))
433
      GC_marker_sp[(word)id] = GC_approx_sp();
434
#   endif
435
#   if defined(IA64) && defined(USE_PROC_FOR_LIBRARIES)
436
      marker_bsp[(word)id] = GC_save_regs_in_stack();
437
#   endif
438
#   if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
439
      marker_mach_threads[(word)id] = mach_thread_self();
440
#   endif
441
#   if !defined(GC_PTHREADS_PARAMARK)
442
      GC_marker_Id[(word)id] = thread_id_self();
443
#   endif
444

445
    /* Inform GC_start_mark_threads about completion of marker data init. */
446
    GC_acquire_mark_lock();
56✔
447
    if (0 == --GC_fl_builder_count) /* count may have a negative value */
56✔
448
      GC_notify_all_builder();
56✔
449

450
    /* GC_mark_no is passed only to allow GC_help_marker to terminate   */
451
    /* promptly.  This is important if it were called from the signal   */
452
    /* handler or from the GC lock acquisition code.  Under Linux, it's */
453
    /* not safe to call it from a signal handler, since it uses mutexes */
454
    /* and condition variables.  Since it is called only here, the      */
455
    /* argument is unnecessary.                                         */
456
    for (;; ++my_mark_no) {
3,564✔
457
      if (my_mark_no - GC_mark_no > (word)2) {
3,620✔
458
        /* resynchronize if we get far off, e.g. because GC_mark_no     */
459
        /* wrapped.                                                     */
460
        my_mark_no = GC_mark_no;
43✔
461
      }
462
#     ifdef DEBUG_THREADS
463
        GC_log_printf("Starting helper for mark number %lu (thread %u)\n",
464
                      (unsigned long)my_mark_no, (unsigned)(word)id);
465
#     endif
466
      GC_help_marker(my_mark_no);
3,620✔
467
    }
3,564✔
468
  }
469

470
  GC_INNER_WIN32THREAD int GC_available_markers_m1 = 0;
471

472
#endif /* PARALLEL_MARK */
473

474
#ifdef GC_PTHREADS_PARAMARK
475

476
# ifdef GLIBC_2_1_MUTEX_HACK
477
    /* Ugly workaround for a linux threads bug in the final versions    */
478
    /* of glibc 2.1.  Pthread_mutex_trylock sets the mutex owner        */
479
    /* field even when it fails to acquire the mutex.  This causes      */
480
    /* pthread_cond_wait to die.  Should not be needed for glibc 2.2.   */
481
    /* According to the man page, we should use                         */
482
    /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
483
    /* defined.                                                         */
484
    static pthread_mutex_t mark_mutex =
485
        {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
486
# else
487
    static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
488
# endif
489

490
# ifdef CAN_HANDLE_FORK
491
    static pthread_cond_t mark_cv;
492
                        /* initialized by GC_start_mark_threads_inner   */
493
# else
494
    static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
495
# endif
496

497
  GC_INNER void GC_start_mark_threads_inner(void)
56✔
498
  {
499
    int i;
500
    pthread_attr_t attr;
501
#   ifndef NO_MARKER_SPECIAL_SIGMASK
502
      sigset_t set, oldset;
503
#   endif
504

505
    GC_ASSERT(I_HOLD_LOCK());
56✔
506
    ASSERT_CANCEL_DISABLED();
56✔
507
    if (GC_available_markers_m1 <= 0 || GC_parallel) return;
56✔
508
                /* Skip if parallel markers disabled or already started. */
509
    GC_wait_for_gc_completion(TRUE);
56✔
510

511
#   ifdef CAN_HANDLE_FORK
512
      /* Initialize mark_cv (for the first time), or cleanup its value  */
513
      /* after forking in the child process.  All the marker threads in */
514
      /* the parent process were blocked on this variable at fork, so   */
515
      /* pthread_cond_wait() malfunction (hang) is possible in the      */
516
      /* child process without such a cleanup.                          */
517
      /* TODO: This is not portable, it is better to shortly unblock    */
518
      /* all marker threads in the parent process at fork.              */
519
      {
520
        pthread_cond_t mark_cv_local = PTHREAD_COND_INITIALIZER;
56✔
521
        BCOPY(&mark_cv_local, &mark_cv, sizeof(mark_cv));
56✔
522
      }
523
#   endif
524

525
    GC_ASSERT(GC_fl_builder_count == 0);
56✔
526
    INIT_REAL_SYMS(); /* for pthread_create */
527
    if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
56✔
528
    if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
56✔
529
        ABORT("pthread_attr_setdetachstate failed");
×
530

531
#   ifdef DEFAULT_STACK_MAYBE_SMALL
532
      /* Default stack size is usually too small: increase it.  */
533
      /* Otherwise marker threads or GC may run out of space.   */
534
      {
535
        size_t old_size;
536

537
        if (pthread_attr_getstacksize(&attr, &old_size) != 0)
538
          ABORT("pthread_attr_getstacksize failed");
539
        if (old_size < MIN_STACK_SIZE
540
            && old_size != 0 /* stack size is known */) {
541
          if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
542
            ABORT("pthread_attr_setstacksize failed");
543
        }
544
      }
545
#   endif /* DEFAULT_STACK_MAYBE_SMALL */
546

547
#   ifndef NO_MARKER_SPECIAL_SIGMASK
548
      /* Apply special signal mask to GC marker threads, and don't drop */
549
      /* user defined signals by GC marker threads.                     */
550
      if (sigfillset(&set) != 0)
56✔
551
        ABORT("sigfillset failed");
×
552

553
#     ifdef SIGNAL_BASED_STOP_WORLD
554
        /* These are used by GC to stop and restart the world.  */
555
        if (sigdelset(&set, GC_get_suspend_signal()) != 0
56✔
556
            || sigdelset(&set, GC_get_thr_restart_signal()) != 0)
56✔
557
          ABORT("sigdelset failed");
×
558
#     endif
559

560
      if (EXPECT(REAL_FUNC(pthread_sigmask)(SIG_BLOCK,
56✔
561
                                            &set, &oldset) < 0, FALSE)) {
562
        WARN("pthread_sigmask set failed, no markers started\n", 0);
×
563
        GC_markers_m1 = 0;
×
564
        (void)pthread_attr_destroy(&attr);
×
565
        return;
×
566
      }
567
#   endif /* !NO_MARKER_SPECIAL_SIGMASK */
568

569
    /* To have proper GC_parallel value in GC_help_marker.      */
570
    GC_markers_m1 = GC_available_markers_m1;
56✔
571

572
    for (i = 0; i < GC_available_markers_m1; ++i) {
112✔
573
      pthread_t new_thread;
574

575
#     ifdef GC_WIN32_THREADS
576
        GC_marker_last_stack_min[i] = ADDR_LIMIT;
577
#     endif
578
      if (EXPECT(REAL_FUNC(pthread_create)(&new_thread, &attr, GC_mark_thread,
56✔
579
                                           (void *)(word)i) != 0, FALSE)) {
580
        WARN("Marker thread %" WARN_PRIdPTR " creation failed\n",
×
581
             (signed_word)i);
582
        /* Don't try to create other marker threads.    */
583
        GC_markers_m1 = i;
×
584
        break;
×
585
      }
586
    }
587

588
#   ifndef NO_MARKER_SPECIAL_SIGMASK
589
      /* Restore previous signal mask.  */
590
      if (EXPECT(REAL_FUNC(pthread_sigmask)(SIG_SETMASK,
56✔
591
                                            &oldset, NULL) < 0, FALSE)) {
592
        WARN("pthread_sigmask restore failed\n", 0);
×
593
      }
594
#   endif
595

596
    (void)pthread_attr_destroy(&attr);
56✔
597
    GC_wait_for_markers_init();
56✔
598
    GC_COND_LOG_PRINTF("Started %d mark helper threads\n", GC_markers_m1);
56✔
599
  }
600

601
#endif /* GC_PTHREADS_PARAMARK */
602

603
/* A hash table to keep information about the registered threads.       */
604
/* Not used if GC_win32_dll_threads is set.                             */
605
GC_INNER GC_thread GC_threads[THREAD_TABLE_SZ] = {0};
606

607
/* A place to retain a pointer to an allocated object while a thread    */
608
/* registration is ongoing.  Protected by the GC lock.                  */
609
static GC_stack_context_t saved_crtn = NULL;
610

611
#ifdef GC_ASSERTIONS
612
  GC_INNER GC_bool GC_thr_initialized = FALSE;
613
#endif
614

615
void GC_push_thread_structures(void)
×
616
{
617
  GC_ASSERT(I_HOLD_LOCK());
×
618
# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
619
    if (GC_win32_dll_threads) {
620
      /* Unlike the other threads implementations, the thread table     */
621
      /* here contains no pointers to the collectible heap (note also   */
622
      /* that GC_PTHREADS is incompatible with DllMain-based thread     */
623
      /* registration).  Thus we have no private structures we need     */
624
      /* to preserve.                                                   */
625
    } else
626
# endif
627
  /* else */ {
628
    GC_PUSH_ALL_SYM(GC_threads);
×
629
    GC_PUSH_ALL_SYM(saved_crtn);
×
630
  }
631
# if defined(THREAD_LOCAL_ALLOC) && defined(USE_CUSTOM_SPECIFIC)
632
    GC_PUSH_ALL_SYM(GC_thread_key);
×
633
# endif
634
}
×
635

636
#if defined(MPROTECT_VDB) && defined(GC_WIN32_THREADS)
637
  GC_INNER void GC_win32_unprotect_thread(GC_thread t)
638
  {
639
    if (!GC_win32_dll_threads && GC_auto_incremental) {
640
      GC_stack_context_t crtn = t -> crtn;
641

642
      GC_ASSERT(SMALL_OBJ(GC_size(crtn)));
643
      GC_remove_protection(HBLKPTR(crtn), 1, FALSE);
644
      GC_ASSERT(SMALL_OBJ(GC_size(t)));
645
      GC_remove_protection(HBLKPTR(t), 1, FALSE);
646
    }
647
  }
648
#endif /* MPROTECT_VDB && GC_WIN32_THREADS */
649

650
#ifdef DEBUG_THREADS
651
  STATIC int GC_count_threads(void)
652
  {
653
    int i;
654
    int count = 0;
655

656
#   if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
657
      if (GC_win32_dll_threads) return -1; /* not implemented */
658
#   endif
659
    GC_ASSERT(I_HOLD_LOCK());
660
    for (i = 0; i < THREAD_TABLE_SZ; ++i) {
661
        GC_thread p;
662

663
        for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
664
            if (!KNOWN_FINISHED(p))
665
                ++count;
666
        }
667
    }
668
    return count;
669
  }
670
#endif /* DEBUG_THREADS */
671

672
/* Add a thread to GC_threads.  We assume it wasn't already there.      */
673
/* The id field is set by the caller.                                   */
674
GC_INNER_WIN32THREAD GC_thread GC_new_thread(thread_id_t self_id)
43,871✔
675
{
676
    int hv = THREAD_TABLE_INDEX(self_id);
43,871✔
677
    GC_thread result;
678
    GC_stack_context_t crtn;
679

680
    GC_ASSERT(I_HOLD_LOCK());
43,871✔
681
#   ifdef DEBUG_THREADS
682
        GC_log_printf("Creating thread %p\n", (void *)(signed_word)self_id);
683
        for (result = GC_threads[hv];
684
             result != NULL; result = result -> tm.next)
685
          if (!THREAD_ID_EQUAL(result -> id, self_id)) {
686
            GC_log_printf("Hash collision at GC_threads[%d]\n", hv);
687
            break;
688
          }
689
#   endif
690
    GC_ASSERT(!GC_win32_dll_threads);
691
    GC_ASSERT(!GC_in_thread_creation);
43,871✔
692
    GC_in_thread_creation = TRUE; /* OK to collect from unknown thread */
43,871✔
693
    crtn = (GC_stack_context_t)GC_INTERNAL_MALLOC(
43,871✔
694
                        sizeof(struct GC_StackContext_Rep), NORMAL);
695

696
    /* The current stack is not scanned until the thread is     */
697
    /* registered, thus crtn pointer is to be retained in the   */
698
    /* global data roots for a while (and pushed explicitly if  */
699
    /* a collection occurs here).                               */
700
    GC_ASSERT(NULL == saved_crtn);
43,871✔
701
    saved_crtn = crtn;
43,871✔
702
    result = (GC_thread)GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep),
43,871✔
703
                                           NORMAL);
704
    saved_crtn = NULL; /* no more collections till thread is registered */
43,871✔
705
    GC_in_thread_creation = FALSE;
43,871✔
706
    if (NULL == crtn || NULL == result)
43,871✔
707
      ABORT("Failed to allocate memory for thread registering");
×
708
    result -> crtn = crtn;
43,871✔
709

710
    /* The id field is not set here. */
711
#   ifdef USE_TKILL_ON_ANDROID
712
      result -> kernel_id = gettid();
713
#   endif
714
    result -> tm.next = GC_threads[hv];
43,871✔
715
    GC_threads[hv] = result;
43,871✔
716
#   ifdef NACL
717
      GC_nacl_initialize_gc_thread(result);
718
#   endif
719
    GC_ASSERT(0 == result -> flags);
43,871✔
720
    GC_dirty(result);
43,871✔
721
    return result;
43,871✔
722
}
723

724
/* Delete a thread from GC_threads.  We assume it is there.  (The code  */
725
/* intentionally traps if it was not.)  It is also safe to delete the   */
726
/* main thread.  If GC_win32_dll_threads is set, it should be called    */
727
/* only from the thread being deleted.  If a thread has been joined,    */
728
/* but we have not yet been notified, then there may be more than one   */
729
/* thread in the table with the same thread id - this is OK because we  */
730
/* delete a specific one.                                               */
731
GC_INNER_WIN32THREAD void GC_delete_thread(GC_thread t)
43,600✔
732
{
733
# if defined(GC_WIN32_THREADS) && !defined(MSWINCE)
734
    CloseHandle(t -> handle);
735
# endif
736
# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
737
    if (GC_win32_dll_threads) {
738
      /* This is intended to be lock-free.  It is either called         */
739
      /* synchronously from the thread being deleted, or by the joining */
740
      /* thread.  In this branch asynchronous changes to (*t) are       */
741
      /* possible.  Note that it is not allowed to call GC_printf (and  */
742
      /* the friends) here, see GC_stop_world() in win32_threads.c for  */
743
      /* the information.                                               */
744
      t -> crtn -> stack_end = NULL;
745
      t -> id = 0;
746
      t -> flags = 0; /* !IS_SUSPENDED */
747
#     ifdef RETRY_GET_THREAD_CONTEXT
748
        t -> context_sp = NULL;
749
#     endif
750
      AO_store_release(&(t -> tm.in_use), FALSE);
751
    } else
752
# endif
753
  /* else */ {
754
    thread_id_t id = t -> id;
43,600✔
755
    int hv = THREAD_TABLE_INDEX(id);
43,600✔
756
    GC_thread p;
757
    GC_thread prev = NULL;
43,600✔
758

759
    GC_ASSERT(I_HOLD_LOCK());
43,600✔
760
#   if defined(DEBUG_THREADS) && !defined(MSWINCE) \
761
       && (!defined(MSWIN32) || defined(CONSOLE_LOG))
762
      GC_log_printf("Deleting thread %p, n_threads= %d\n",
763
                    (void *)(signed_word)id, GC_count_threads());
764
#   endif
765
    for (p = GC_threads[hv]; p != t; p = p -> tm.next) {
45,820✔
766
      prev = p;
2,220✔
767
    }
768
    if (NULL == prev) {
43,600✔
769
        GC_threads[hv] = p -> tm.next;
41,384✔
770
    } else {
771
        prev -> tm.next = p -> tm.next;
2,216✔
772
        GC_dirty(prev);
2,216✔
773
    }
774
#   ifdef GC_DARWIN_THREADS
775
      mach_port_deallocate(mach_task_self(), p -> mach_thread);
776
#   endif
777
    GC_INTERNAL_FREE(p -> crtn);
43,600✔
778
    GC_INTERNAL_FREE(p);
43,600✔
779
  }
780
}
43,600✔
781

782
/* Return a GC_thread corresponding to a given thread id, or    */
783
/* NULL if it is not there.                                     */
784
/* Caller holds allocation lock or otherwise inhibits updates.  */
785
/* If there is more than one thread with the given id we        */
786
/* return the most recent one.                                  */
787
GC_INNER GC_thread GC_lookup_thread(thread_id_t id)
159,073,534✔
788
{
789
  GC_thread p;
790

791
# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
792
    if (GC_win32_dll_threads)
793
      return GC_win32_dll_lookup_thread(id);
794
# endif
795
  for (p = GC_threads[THREAD_TABLE_INDEX(id)];
318,209,433✔
796
       p != NULL; p = p -> tm.next) {
62,365✔
797
    if (THREAD_ID_EQUAL(p -> id, id)) break;
159,115,276✔
798
  }
799
  return p;
159,073,534✔
800
}
801

802
/* Same as GC_self_thread_inner() but acquires the GC lock.     */
803
STATIC GC_thread GC_self_thread(void) {
154,478,408✔
804
  GC_thread p;
805

806
  LOCK();
154,478,408✔
807
  p = GC_self_thread_inner();
154,512,602✔
808
  UNLOCK();
154,486,352✔
809
  return p;
154,485,738✔
810
}
811

812
#ifndef GC_NO_FINALIZATION
813
  /* Called by GC_finalize() (in case of an allocation failure observed). */
814
  GC_INNER void GC_reset_finalizer_nested(void)
×
815
  {
816
    GC_ASSERT(I_HOLD_LOCK());
×
817
    GC_self_thread_inner() -> crtn -> finalizer_nested = 0;
×
818
  }
×
819

820
  /* Checks and updates the thread-local level of finalizers recursion. */
821
  /* Returns NULL if GC_invoke_finalizers() should not be called by the */
822
  /* collector (to minimize the risk of a deep finalizers recursion),   */
823
  /* otherwise returns a pointer to the thread-local finalizer_nested.  */
824
  /* Called by GC_notify_or_invoke_finalizers() only.                   */
825
  GC_INNER unsigned char *GC_check_finalizer_nested(void)
1,827✔
826
  {
827
    GC_stack_context_t crtn;
828
    unsigned nesting_level;
829

830
    GC_ASSERT(I_HOLD_LOCK());
1,827✔
831
    crtn = GC_self_thread_inner() -> crtn;
1,827✔
832
    nesting_level = crtn -> finalizer_nested;
1,827✔
833
    if (nesting_level) {
1,827✔
834
      /* We are inside another GC_invoke_finalizers().          */
835
      /* Skip some implicitly-called GC_invoke_finalizers()     */
836
      /* depending on the nesting (recursion) level.            */
837
      if (++(crtn -> finalizer_skipped) < (1U << nesting_level))
×
838
        return NULL;
×
839
      crtn -> finalizer_skipped = 0;
×
840
    }
841
    crtn -> finalizer_nested = (unsigned char)(nesting_level + 1);
1,827✔
842
    return &(crtn -> finalizer_nested);
1,827✔
843
  }
844
#endif /* !GC_NO_FINALIZATION */
845

846
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
847
  /* This is called from thread-local GC_malloc(). */
848
  GC_bool GC_is_thread_tsd_valid(void *tsd)
154,479,976✔
849
  {
850
    GC_thread me = GC_self_thread();
154,479,976✔
851

852
    return (word)tsd >= (word)(&me->tlfs)
308,970,168✔
853
            && (word)tsd < (word)(&me->tlfs) + sizeof(me->tlfs);
154,485,084✔
854
  }
855
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
856

857
GC_API int GC_CALL GC_thread_is_registered(void)
42✔
858
{
859
  /* TODO: Use GC_get_tlfs() instead. */
860
  GC_thread me = GC_self_thread();
42✔
861

862
  return me != NULL && !KNOWN_FINISHED(me);
42✔
863
}
864

865
#ifndef GC_WIN32_THREADS
866
  static void *main_normstack, *main_altstack;
867
  static word main_normstack_size, main_altstack_size;
868
#endif
869

870
GC_API void GC_CALL GC_register_altstack(void *normstack,
×
871
                GC_word normstack_size, void *altstack, GC_word altstack_size)
872
{
873
#ifdef GC_WIN32_THREADS
874
  /* TODO: Implement */
875
  UNUSED_ARG(normstack);
876
  UNUSED_ARG(normstack_size);
877
  UNUSED_ARG(altstack);
878
  UNUSED_ARG(altstack_size);
879
#else
880
  GC_thread me;
881

882
  LOCK();
×
883
  me = GC_self_thread_inner();
×
884
  if (EXPECT(me != NULL, TRUE)) {
×
885
    GC_stack_context_t crtn = me -> crtn;
×
886

887
    crtn -> normstack = (ptr_t)normstack;
×
888
    crtn -> normstack_size = normstack_size;
×
889
    crtn -> altstack = (ptr_t)altstack;
×
890
    crtn -> altstack_size = altstack_size;
×
891
  } else {
892
    /* We are called before GC_thr_init. */
893
    main_normstack = normstack;
×
894
    main_normstack_size = normstack_size;
×
895
    main_altstack = altstack;
×
896
    main_altstack_size = altstack_size;
×
897
  }
898
  UNLOCK();
×
899
#endif
900
}
×
901

902
#ifdef USE_PROC_FOR_LIBRARIES
903
  GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
904
  {
905
    int i;
906
    GC_thread p;
907

908
    GC_ASSERT(I_HOLD_LOCK());
909
#   ifdef PARALLEL_MARK
910
      for (i = 0; i < GC_markers_m1; ++i) {
911
        if ((word)GC_marker_sp[i] > (word)lo
912
            && (word)GC_marker_sp[i] < (word)hi)
913
          return TRUE;
914
#       ifdef IA64
915
          if ((word)marker_bsp[i] > (word)lo
916
              && (word)marker_bsp[i] < (word)hi)
917
            return TRUE;
918
#       endif
919
      }
920
#   endif
921
    for (i = 0; i < THREAD_TABLE_SZ; i++) {
922
      for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
923
        GC_stack_context_t crtn = p -> crtn;
924

925
        if (crtn -> stack_end != NULL) {
926
#         ifdef STACK_GROWS_UP
927
            if ((word)crtn -> stack_end >= (word)lo
928
                && (word)crtn -> stack_end < (word)hi)
929
              return TRUE;
930
#         else /* STACK_GROWS_DOWN */
931
            if ((word)crtn -> stack_end > (word)lo
932
                && (word)crtn -> stack_end <= (word)hi)
933
              return TRUE;
934
#         endif
935
        }
936
      }
937
    }
938
    return FALSE;
939
  }
940
#endif /* USE_PROC_FOR_LIBRARIES */
941

942
#if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
943
    && defined(IA64)
944
  /* Find the largest stack base smaller than bound.  May be used       */
945
  /* to find the boundary between a register stack and adjacent         */
946
  /* immediately preceding memory stack.                                */
947
  GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound)
948
  {
949
    int i;
950
    GC_thread p;
951
    ptr_t result = 0;
952

953
    GC_ASSERT(I_HOLD_LOCK());
954
#   ifdef PARALLEL_MARK
955
      for (i = 0; i < GC_markers_m1; ++i) {
956
        if ((word)GC_marker_sp[i] > (word)result
957
            && (word)GC_marker_sp[i] < (word)bound)
958
          result = GC_marker_sp[i];
959
      }
960
#   endif
961
    for (i = 0; i < THREAD_TABLE_SZ; i++) {
962
      for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
963
        GC_stack_context_t crtn = p -> crtn;
964

965
        if ((word)(crtn -> stack_end) > (word)result
966
            && (word)(crtn -> stack_end) < (word)bound) {
967
          result = crtn -> stack_end;
968
        }
969
      }
970
    }
971
    return result;
972
  }
973
#endif /* IA64 */
974

975
#ifndef STAT_READ
976
# define STAT_READ read
977
        /* If read is wrapped, this may need to be redefined to call    */
978
        /* the real one.                                                */
979
#endif
980

981
#ifdef GC_HPUX_THREADS
982
# define GC_get_nprocs() pthread_num_processors_np()
983

984
#elif defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \
985
      || defined(GC_HAIKU_THREADS) || defined(GC_SOLARIS_THREADS) \
986
      || defined(HURD) || defined(HOST_ANDROID) || defined(NACL)
987
  GC_INLINE int GC_get_nprocs(void)
988
  {
989
    int nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
990
    return nprocs > 0 ? nprocs : 1; /* ignore error silently */
991
  }
992

993
#elif defined(GC_IRIX_THREADS)
994
  GC_INLINE int GC_get_nprocs(void)
995
  {
996
    int nprocs = (int)sysconf(_SC_NPROC_ONLN);
997
    return nprocs > 0 ? nprocs : 1; /* ignore error silently */
998
  }
999

1000
#elif defined(GC_LINUX_THREADS) /* && !HOST_ANDROID && !NACL */
1001
  /* Return the number of processors. */
1002
  STATIC int GC_get_nprocs(void)
32✔
1003
  {
1004
    /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that     */
1005
    /* appears to be buggy in many cases.                             */
1006
    /* We look for lines "cpu<n>" in /proc/stat.                      */
1007
#   define PROC_STAT_BUF_SZ ((1 + MAX_MARKERS) * 100) /* should be enough */
1008
    /* No need to read the entire /proc/stat to get maximum cpu<N> as   */
1009
    /* - the requested lines are located at the beginning of the file;  */
1010
    /* - the lines with cpu<N> where N > MAX_MARKERS are not needed.    */
1011
    char stat_buf[PROC_STAT_BUF_SZ+1];
1012
    int f;
1013
    int result, i, len;
1014

1015
    f = open("/proc/stat", O_RDONLY);
32✔
1016
    if (f < 0) {
32✔
1017
      WARN("Could not open /proc/stat\n", 0);
×
1018
      return 1; /* assume an uniprocessor */
32✔
1019
    }
1020
    len = STAT_READ(f, stat_buf, sizeof(stat_buf)-1);
32✔
1021
    /* Unlikely that we need to retry because of an incomplete read here. */
1022
    if (len < 0) {
32✔
1023
      WARN("Failed to read /proc/stat, errno= %" WARN_PRIdPTR "\n",
×
1024
           (signed_word)errno);
1025
      close(f);
×
1026
      return 1;
×
1027
    }
1028
    stat_buf[len] = '\0'; /* to avoid potential buffer overrun by atoi() */
32✔
1029
    close(f);
32✔
1030

1031
    result = 1;
32✔
1032
        /* Some old kernels only have a single "cpu nnnn ..."   */
1033
        /* entry in /proc/stat.  We identify those as           */
1034
        /* uniprocessors.                                       */
1035

1036
    for (i = 0; i < len - 4; ++i) {
37,070✔
1037
      if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
37,038✔
1038
          && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
96✔
1039
        int cpu_no = atoi(&stat_buf[i + 4]);
64✔
1040
        if (cpu_no >= result)
64✔
1041
          result = cpu_no + 1;
32✔
1042
      }
1043
    }
1044
    return result;
32✔
1045
  }
1046

1047
#elif defined(GC_DGUX386_THREADS)
1048
  /* Return the number of processors, or i <= 0 if it can't be determined. */
1049
  STATIC int GC_get_nprocs(void)
1050
  {
1051
    int numCpus;
1052
    struct dg_sys_info_pm_info pm_sysinfo;
1053
    int status = 0;
1054

1055
    status = dg_sys_info((long int *) &pm_sysinfo,
1056
        DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
1057
    if (status < 0) {
1058
       /* set -1 for error */
1059
       numCpus = -1;
1060
    } else {
1061
      /* Active CPUs */
1062
      numCpus = pm_sysinfo.idle_vp_count;
1063
    }
1064
    return numCpus;
1065
  }
1066

1067
#elif defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) \
1068
      || defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
1069
  STATIC int GC_get_nprocs(void)
1070
  {
1071
    int mib[] = {CTL_HW,HW_NCPU};
1072
    int res;
1073
    size_t len = sizeof(res);
1074

1075
    sysctl(mib, sizeof(mib)/sizeof(int), &res, &len, NULL, 0);
1076
    return res;
1077
  }
1078

1079
#else
1080
  /* E.g., GC_RTEMS_PTHREADS */
1081
# define GC_get_nprocs() 1 /* not implemented */
1082
#endif /* !GC_LINUX_THREADS && !GC_DARWIN_THREADS && ... */
1083

1084
#if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
1085
  /* Some buggy Linux/arm kernels show only non-sleeping CPUs in        */
1086
  /* /proc/stat (and /proc/cpuinfo), so another data system source is   */
1087
  /* tried first.  Result <= 0 on error.                                */
1088
  STATIC int GC_get_nprocs_present(void)
1089
  {
1090
    char stat_buf[16];
1091
    int f;
1092
    int len;
1093

1094
    f = open("/sys/devices/system/cpu/present", O_RDONLY);
1095
    if (f < 0)
1096
      return -1; /* cannot open the file */
1097

1098
    len = STAT_READ(f, stat_buf, sizeof(stat_buf));
1099
    close(f);
1100

1101
    /* Recognized file format: "0\n" or "0-<max_cpu_id>\n"      */
1102
    /* The file might probably contain a comma-separated list   */
1103
    /* but we do not need to handle it (just silently ignore).  */
1104
    if (len < 2 || stat_buf[0] != '0' || stat_buf[len - 1] != '\n') {
1105
      return 0; /* read error or unrecognized content */
1106
    } else if (len == 2) {
1107
      return 1; /* an uniprocessor */
1108
    } else if (stat_buf[1] != '-') {
1109
      return 0; /* unrecognized content */
1110
    }
1111

1112
    stat_buf[len - 1] = '\0'; /* terminate the string */
1113
    return atoi(&stat_buf[2]) + 1; /* skip "0-" and parse max_cpu_num */
1114
  }
1115
#endif /* ARM32 && GC_LINUX_THREADS && !NACL */
1116

1117
#if defined(CAN_HANDLE_FORK) && defined(THREAD_SANITIZER)
1118
# include "private/gc_pmark.h" /* for MS_NONE */
1119

1120
  /* Workaround for TSan which does not notice that the GC lock */
1121
  /* is acquired in fork_prepare_proc().                        */
1122
  GC_ATTR_NO_SANITIZE_THREAD
1123
  static GC_bool collection_in_progress(void)
1124
  {
1125
    return GC_mark_state != MS_NONE;
1126
  }
1127
#else
1128
# define collection_in_progress() GC_collection_in_progress()
1129
#endif
1130

1131
/* We hold the GC lock.  Wait until an in-progress GC has finished.     */
1132
/* Repeatedly releases the GC lock in order to wait.                    */
1133
/* If wait_for_all is true, then we exit with the GC lock held and no   */
1134
/* collection in progress; otherwise we just wait for the current GC    */
1135
/* to finish.                                                           */
1136
GC_INNER void GC_wait_for_gc_completion(GC_bool wait_for_all)
43,942✔
1137
{
1138
# if !defined(THREAD_SANITIZER) || !defined(CAN_CALL_ATFORK)
1139
    /* GC_lock_holder is accessed with the lock held, so there is no    */
1140
    /* data race actually (unlike what is reported by TSan).            */
1141
    GC_ASSERT(I_HOLD_LOCK());
43,942✔
1142
# endif
1143
  ASSERT_CANCEL_DISABLED();
43,942✔
1144
# ifdef GC_DISABLE_INCREMENTAL
1145
    (void)wait_for_all;
1146
# else
1147
    if (GC_incremental && collection_in_progress()) {
43,942✔
1148
        word old_gc_no = GC_gc_no;
×
1149

1150
        /* Make sure that no part of our stack is still on the mark     */
1151
        /* stack, since it's about to be unmapped.                      */
1152
        do {
1153
            ENTER_GC();
×
1154
            GC_ASSERT(!GC_in_thread_creation);
×
1155
            GC_in_thread_creation = TRUE;
×
1156
            GC_collect_a_little_inner(1);
×
1157
            GC_in_thread_creation = FALSE;
×
1158
            EXIT_GC();
×
1159

1160
            UNLOCK();
×
1161
#           ifdef GC_WIN32_THREADS
1162
              Sleep(0);
1163
#           else
1164
              sched_yield();
×
1165
#           endif
1166
            LOCK();
×
1167
        } while (GC_incremental && collection_in_progress()
×
1168
                 && (wait_for_all || old_gc_no == GC_gc_no));
×
1169
    }
1170
# endif
1171
}
43,942✔
1172

1173
#ifdef CAN_HANDLE_FORK
1174

1175
  /* Procedures called before and after a fork.  The goal here is to    */
1176
  /* make it safe to call GC_malloc() in a forked child.  It is unclear */
1177
  /* that is attainable, since the single UNIX spec seems to imply that */
1178
  /* one should only call async-signal-safe functions, and we probably  */
1179
  /* cannot quite guarantee that.  But we give it our best shot.  (That */
1180
  /* same spec also implies that it is not safe to call the system      */
1181
  /* malloc between fork and exec.  Thus we're doing no worse than it.) */
1182

1183
  IF_CANCEL(static int fork_cancel_state;) /* protected by allocation lock */
1184

1185
# ifdef PARALLEL_MARK
1186
#   ifdef THREAD_SANITIZER
1187
#     if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1188
        STATIC void GC_generic_lock(pthread_mutex_t *);
1189
#     endif
1190
      GC_ATTR_NO_SANITIZE_THREAD
1191
      static void wait_for_reclaim_atfork(void);
1192
#   else
1193
#     define wait_for_reclaim_atfork() GC_wait_for_reclaim()
1194
#   endif
1195
# endif /* PARALLEL_MARK */
1196

1197
  /* Prevent TSan false positive about the race during items removal    */
1198
  /* from GC_threads.  (The race cannot happen since only one thread    */
1199
  /* survives in the child.)                                            */
1200
# ifdef CAN_CALL_ATFORK
1201
    GC_ATTR_NO_SANITIZE_THREAD
1202
# endif
1203
  static void store_to_threads_table(int hv, GC_thread me)
10,794✔
1204
  {
1205
    GC_threads[hv] = me;
10,794✔
1206
  }
10,794✔
1207

1208
  /* Remove all entries from the GC_threads table, except the one for   */
1209
  /* the current thread.  We need to do this in the child process after */
1210
  /* a fork(), since only the current thread survives in the child.     */
1211
  STATIC void GC_remove_all_threads_but_me(void)
42✔
1212
  {
1213
    int hv;
1214
    GC_thread me = NULL;
42✔
1215
    pthread_t self = pthread_self(); /* same as in parent */
42✔
1216
#   ifndef GC_WIN32_THREADS
1217
#     define pthread_id id
1218
#   endif
1219

1220
    for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
10,794✔
1221
      GC_thread p, next;
1222

1223
      for (p = GC_threads[hv]; p != NULL; p = next) {
11,634✔
1224
        next = p -> tm.next;
882✔
1225
        if (THREAD_EQUAL(p -> pthread_id, self)
882✔
1226
            && me == NULL) { /* ignore dead threads with the same id */
42✔
1227
          me = p;
42✔
1228
          p -> tm.next = NULL;
42✔
1229
        } else {
1230
#         ifdef THREAD_LOCAL_ALLOC
1231
            if (!KNOWN_FINISHED(p)) {
840✔
1232
              /* Cannot call GC_destroy_thread_local here.  The free    */
1233
              /* lists may be in an inconsistent state (as thread p may */
1234
              /* be updating one of the lists by GC_generic_malloc_many */
1235
              /* or GC_FAST_MALLOC_GRANS when fork is invoked).         */
1236
              /* This should not be a problem because the lost elements */
1237
              /* of the free lists will be collected during GC.         */
1238
              GC_remove_specific_after_fork(GC_thread_key, p -> pthread_id);
840✔
1239
            }
1240
#         endif
1241
          /* TODO: To avoid TSan hang (when updating GC_bytes_freed),   */
1242
          /* we just skip explicit freeing of GC_threads entries.       */
1243
#         if !defined(THREAD_SANITIZER) || !defined(CAN_CALL_ATFORK)
1244
              /* TODO: Should call mach_port_deallocate? */
1245
              GC_INTERNAL_FREE(p -> crtn);
840✔
1246
              GC_INTERNAL_FREE(p);
840✔
1247
#         endif
1248
        }
1249
      }
1250
      store_to_threads_table(hv, NULL);
10,752✔
1251
    }
1252

1253
    GC_ASSERT(me != NULL);
42✔
1254
#   ifdef GC_WIN32_THREADS
1255
      /* Update Win32 thread id and handle.     */
1256
      me -> id = thread_id_self(); /* differs from that in parent */
1257
#     ifndef MSWINCE
1258
        if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
1259
                        GetCurrentProcess(), (HANDLE *)&(me -> handle),
1260
                        0 /* dwDesiredAccess */, FALSE /* bInheritHandle */,
1261
                        DUPLICATE_SAME_ACCESS))
1262
          ABORT("DuplicateHandle failed");
1263
#     endif
1264
#   endif
1265
#   ifdef GC_DARWIN_THREADS
1266
      /* Update thread Id after fork (it is OK to call  */
1267
      /* GC_destroy_thread_local and GC_free_inner      */
1268
      /* before update).                                */
1269
      me -> mach_thread = mach_thread_self();
1270
#   endif
1271
#   ifdef USE_TKILL_ON_ANDROID
1272
      me -> kernel_id = gettid();
1273
#   endif
1274

1275
    /* Put "me" back to GC_threads.     */
1276
    store_to_threads_table(THREAD_TABLE_INDEX(me -> id), me);
42✔
1277

1278
#   if defined(THREAD_LOCAL_ALLOC) && !defined(USE_CUSTOM_SPECIFIC)
1279
      /* Some TLS implementations (e.g., on Cygwin) might be not        */
1280
      /* fork-friendly, so we re-assign thread-local pointer to 'tlfs'  */
1281
      /* for safety instead of the assertion check (again, it is OK to  */
1282
      /* call GC_destroy_thread_local and GC_free_inner before).        */
1283
      {
1284
        int res = GC_setspecific(GC_thread_key, &me->tlfs);
1285

1286
        if (COVERT_DATAFLOW(res) != 0)
1287
          ABORT("GC_setspecific failed (in child)");
1288
      }
1289
#   endif
1290
#   undef pthread_id
1291
  }
42✔
1292

1293
  /* Called before a fork().    */
1294
# if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1295
    /* GC_lock_holder is updated safely (no data race actually).        */
1296
    GC_ATTR_NO_SANITIZE_THREAD
1297
# endif
1298
  static void fork_prepare_proc(void)
84✔
1299
  {
1300
    /* Acquire all relevant locks, so that after releasing the locks    */
1301
    /* the child will see a consistent state in which monitor           */
1302
    /* invariants hold.  Unfortunately, we can't acquire libc locks     */
1303
    /* we might need, and there seems to be no guarantee that libc      */
1304
    /* must install a suitable fork handler.                            */
1305
    /* Wait for an ongoing GC to finish, since we can't finish it in    */
1306
    /* the (one remaining thread in) the child.                         */
1307

1308
      LOCK();
84✔
1309
      DISABLE_CANCEL(fork_cancel_state);
86✔
1310
                /* Following waits may include cancellation points. */
1311
#     ifdef PARALLEL_MARK
1312
        if (GC_parallel)
86✔
1313
          wait_for_reclaim_atfork();
86✔
1314
#     endif
1315
      GC_wait_for_gc_completion(TRUE);
86✔
1316
#     ifdef PARALLEL_MARK
1317
        if (GC_parallel) {
86✔
1318
#         if defined(THREAD_SANITIZER) && defined(GC_ASSERTIONS) \
1319
             && defined(CAN_CALL_ATFORK)
1320
            /* Prevent TSan false positive about the data race  */
1321
            /* when updating GC_mark_lock_holder.               */
1322
            GC_generic_lock(&mark_mutex);
1323
#         else
1324
            GC_acquire_mark_lock();
86✔
1325
#         endif
1326
        }
1327
#     endif
1328
      GC_acquire_dirty_lock();
1329
  }
86✔
1330

1331
  /* Called in parent after a fork() (even if the latter failed).       */
1332
# if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1333
    GC_ATTR_NO_SANITIZE_THREAD
1334
# endif
1335
  static void fork_parent_proc(void)
44✔
1336
  {
1337
    GC_release_dirty_lock();
1338
#   ifdef PARALLEL_MARK
1339
      if (GC_parallel) {
44✔
1340
#       if defined(THREAD_SANITIZER) && defined(GC_ASSERTIONS) \
1341
           && defined(CAN_CALL_ATFORK)
1342
          /* To match that in fork_prepare_proc. */
1343
          (void)pthread_mutex_unlock(&mark_mutex);
1344
#       else
1345
          GC_release_mark_lock();
44✔
1346
#       endif
1347
      }
1348
#   endif
1349
    RESTORE_CANCEL(fork_cancel_state);
44✔
1350
    UNLOCK();
44✔
1351
  }
44✔
1352

1353
  /* Called in child after a fork().    */
1354
# if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1355
    GC_ATTR_NO_SANITIZE_THREAD
1356
# endif
1357
  static void fork_child_proc(void)
42✔
1358
  {
1359
    GC_release_dirty_lock();
1360
#   ifdef PARALLEL_MARK
1361
      if (GC_parallel) {
42✔
1362
#       if defined(THREAD_SANITIZER) && defined(GC_ASSERTIONS) \
1363
           && defined(CAN_CALL_ATFORK)
1364
          (void)pthread_mutex_unlock(&mark_mutex);
1365
#       else
1366
          GC_release_mark_lock();
42✔
1367
#       endif
1368
        /* Turn off parallel marking in the child, since we are probably  */
1369
        /* just going to exec, and we would have to restart mark threads. */
1370
        GC_parallel = FALSE;
42✔
1371
      }
1372
#     ifdef THREAD_SANITIZER
1373
        /* TSan does not support threads creation in the child process. */
1374
        GC_available_markers_m1 = 0;
1375
#     endif
1376
#   endif
1377
    /* Clean up the thread table, so that just our thread is left.      */
1378
    GC_remove_all_threads_but_me();
42✔
1379
#   ifndef GC_DISABLE_INCREMENTAL
1380
      GC_dirty_update_child();
42✔
1381
#   endif
1382
    RESTORE_CANCEL(fork_cancel_state);
42✔
1383
    UNLOCK();
42✔
1384
    /* Even though after a fork the child only inherits the single      */
1385
    /* thread that called the fork(), if another thread in the parent   */
1386
    /* was attempting to lock the mutex while being held in             */
1387
    /* fork_child_prepare(), the mutex will be left in an inconsistent  */
1388
    /* state in the child after the UNLOCK.  This is the case, at       */
1389
    /* least, in Mac OS X and leads to an unusable GC in the child      */
1390
    /* which will block when attempting to perform any GC operation     */
1391
    /* that acquires the allocation mutex.                              */
1392
#   if defined(USE_PTHREAD_LOCKS) && !defined(GC_WIN32_THREADS)
1393
      GC_ASSERT(I_DONT_HOLD_LOCK());
42✔
1394
      /* Reinitialize the mutex.  It should be safe since we are        */
1395
      /* running this in the child which only inherits a single thread. */
1396
      /* mutex_destroy() may return EBUSY, which makes no sense, but    */
1397
      /* that is the reason for the need of the reinitialization.       */
1398
      /* Note: excluded for Cygwin as does not seem to be needed.       */
1399
      (void)pthread_mutex_destroy(&GC_allocate_ml);
42✔
1400
      /* TODO: Probably some targets might need the default mutex       */
1401
      /* attribute to be passed instead of NULL.                        */
1402
      if (0 != pthread_mutex_init(&GC_allocate_ml, NULL))
42✔
1403
        ABORT("pthread_mutex_init failed (in child)");
×
1404
#   endif
1405
  }
42✔
1406

1407
  /* Routines for fork handling by client (no-op if pthread_atfork works). */
1408
  GC_API void GC_CALL GC_atfork_prepare(void)
42✔
1409
  {
1410
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
42✔
1411
#   if defined(GC_DARWIN_THREADS) && defined(MPROTECT_VDB)
1412
      if (GC_auto_incremental) {
1413
        GC_ASSERT(0 == GC_handle_fork);
1414
        ABORT("Unable to fork while mprotect_thread is running");
1415
      }
1416
#   endif
1417
    if (GC_handle_fork <= 0)
42✔
1418
      fork_prepare_proc();
×
1419
  }
42✔
1420

1421
  GC_API void GC_CALL GC_atfork_parent(void)
44✔
1422
  {
1423
    if (GC_handle_fork <= 0)
44✔
1424
      fork_parent_proc();
×
1425
  }
44✔
1426

1427
  GC_API void GC_CALL GC_atfork_child(void)
42✔
1428
  {
1429
    if (GC_handle_fork <= 0)
42✔
1430
      fork_child_proc();
×
1431
  }
42✔
1432

1433
  /* Prepare for forks if requested.    */
1434
  GC_INNER_WIN32THREAD void GC_setup_atfork(void)
32✔
1435
  {
1436
    if (GC_handle_fork) {
32✔
1437
#     ifdef CAN_CALL_ATFORK
1438
        if (pthread_atfork(fork_prepare_proc, fork_parent_proc,
32✔
1439
                           fork_child_proc) == 0) {
1440
          /* Handlers successfully registered.  */
1441
          GC_handle_fork = 1;
32✔
1442
        } else
1443
#     endif
1444
      /* else */ if (GC_handle_fork != -1)
×
1445
        ABORT("pthread_atfork failed");
×
1446
    }
1447
  }
32✔
1448

1449
#endif /* CAN_HANDLE_FORK */
1450

1451
#ifdef INCLUDE_LINUX_THREAD_DESCR
1452
  __thread int GC_dummy_thread_local;
1453
#endif
1454

1455
#ifdef PARALLEL_MARK
1456
# ifndef GC_WIN32_THREADS
1457
    static void setup_mark_lock(void);
1458
# endif
1459

1460
  GC_INNER_WIN32THREAD unsigned GC_required_markers_cnt = 0;
1461
                        /* The default value (0) means the number of    */
1462
                        /* markers should be selected automatically.    */
1463

1464
  GC_API void GC_CALL GC_set_markers_count(unsigned markers)
2✔
1465
  {
1466
    GC_required_markers_cnt = markers < MAX_MARKERS ? markers : MAX_MARKERS;
2✔
1467
  }
2✔
1468
#endif /* PARALLEL_MARK */
1469

1470
GC_INNER GC_bool GC_in_thread_creation = FALSE;
1471
                                /* Protected by allocation lock. */
1472

1473
GC_INNER_WIN32THREAD void GC_record_stack_base(GC_stack_context_t crtn,
44,071✔
1474
                                               const struct GC_stack_base *sb)
1475
{
1476
# if !defined(GC_DARWIN_THREADS) && !defined(GC_WIN32_THREADS)
1477
    crtn -> stack_ptr = (ptr_t)sb->mem_base;
44,071✔
1478
# endif
1479
  if ((crtn -> stack_end = (ptr_t)sb->mem_base) == NULL)
44,071✔
1480
    ABORT("Bad stack base in GC_register_my_thread");
×
1481
# ifdef IA64
1482
    crtn -> backing_store_end = (ptr_t)sb->reg_base;
1483
# elif defined(I386) && defined(GC_WIN32_THREADS)
1484
    crtn -> initial_stack_base = (ptr_t)sb->mem_base;
1485
# endif
1486
}
44,071✔
1487

1488
#ifndef GC_WIN32_THREADS
1489

1490
STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
43,871✔
1491
                                             thread_id_t self_id)
1492
{
1493
  GC_thread me;
1494

1495
  GC_ASSERT(I_HOLD_LOCK());
43,871✔
1496
  me = GC_new_thread(self_id);
43,871✔
1497
  me -> id = self_id;
43,871✔
1498
# ifdef GC_DARWIN_THREADS
1499
    me -> mach_thread = mach_thread_self();
1500
# endif
1501
  GC_record_stack_base(me -> crtn, sb);
43,871✔
1502
  return me;
43,871✔
1503
}
1504

1505
  STATIC int GC_nprocs = 1;
1506
                        /* Number of processors.  We may not have       */
1507
                        /* access to all of them, but this is as good   */
1508
                        /* a guess as any ...                           */
1509

1510
GC_INNER void GC_thr_init(void)
32✔
1511
{
1512
  GC_ASSERT(I_HOLD_LOCK());
32✔
1513
  GC_ASSERT(!GC_thr_initialized);
32✔
1514
  GC_ASSERT((word)(&GC_threads) % sizeof(word) == 0);
1515
# ifdef GC_ASSERTIONS
1516
    GC_thr_initialized = TRUE;
32✔
1517
# endif
1518
# ifdef CAN_HANDLE_FORK
1519
    GC_setup_atfork();
32✔
1520
# endif
1521

1522
# ifdef INCLUDE_LINUX_THREAD_DESCR
1523
    /* Explicitly register the region including the address     */
1524
    /* of a thread local variable.  This should include thread  */
1525
    /* locals for the main thread, except for those allocated   */
1526
    /* in response to dlopen calls.                             */
1527
    {
1528
      ptr_t thread_local_addr = (ptr_t)(&GC_dummy_thread_local);
1529
      ptr_t main_thread_start, main_thread_end;
1530
      if (!GC_enclosing_mapping(thread_local_addr, &main_thread_start,
1531
                                &main_thread_end)) {
1532
        ABORT("Failed to find mapping for main thread thread locals");
1533
      } else {
1534
        /* main_thread_start and main_thread_end are initialized.       */
1535
        GC_add_roots_inner(main_thread_start, main_thread_end, FALSE);
1536
      }
1537
    }
1538
# endif
1539

1540
  /* Set GC_nprocs and GC_available_markers_m1. */
1541
  {
1542
    char * nprocs_string = GETENV("GC_NPROCS");
32✔
1543
    GC_nprocs = -1;
32✔
1544
    if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
32✔
1545
  }
1546
  if (GC_nprocs <= 0
32✔
1547
#     if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
1548
        && (GC_nprocs = GC_get_nprocs_present()) <= 1
1549
                                /* Workaround for some Linux/arm kernels */
1550
#     endif
1551
      )
1552
  {
1553
    GC_nprocs = GC_get_nprocs();
32✔
1554
  }
1555
  if (GC_nprocs <= 0) {
32✔
1556
    WARN("GC_get_nprocs() returned %" WARN_PRIdPTR "\n",
×
1557
         (signed_word)GC_nprocs);
1558
    GC_nprocs = 2; /* assume dual-core */
×
1559
#   ifdef PARALLEL_MARK
1560
      GC_available_markers_m1 = 0; /* but use only one marker */
×
1561
#   endif
1562
  } else {
1563
#   ifdef PARALLEL_MARK
1564
      {
1565
        char * markers_string = GETENV("GC_MARKERS");
32✔
1566
        int markers = GC_required_markers_cnt;
32✔
1567

1568
        if (markers_string != NULL) {
32✔
1569
          markers = atoi(markers_string);
×
1570
          if (markers <= 0 || markers > MAX_MARKERS) {
×
1571
            WARN("Too big or invalid number of mark threads: %" WARN_PRIdPTR
×
1572
                 "; using maximum threads\n", (signed_word)markers);
1573
            markers = MAX_MARKERS;
×
1574
          }
1575
        } else if (0 == markers) {
32✔
1576
          /* Unless the client sets the desired number of       */
1577
          /* parallel markers, it is determined based on the    */
1578
          /* number of CPU cores.                               */
1579
          markers = GC_nprocs;
32✔
1580
#         if defined(GC_MIN_MARKERS) && !defined(CPPCHECK)
1581
            /* This is primarily for targets without getenv().  */
1582
            if (markers < GC_MIN_MARKERS)
1583
              markers = GC_MIN_MARKERS;
1584
#         endif
1585
          if (markers > MAX_MARKERS)
32✔
1586
            markers = MAX_MARKERS; /* silently limit the value */
×
1587
        }
1588
        GC_available_markers_m1 = markers - 1;
32✔
1589
      }
1590
#   endif
1591
  }
1592
  GC_COND_LOG_PRINTF("Number of processors: %d\n", GC_nprocs);
32✔
1593

1594
# if defined(BASE_ATOMIC_OPS_EMULATED) && defined(SIGNAL_BASED_STOP_WORLD)
1595
    /* Ensure the process is running on just one CPU core.      */
1596
    /* This is needed because the AO primitives emulated with   */
1597
    /* locks cannot be used inside signal handlers.             */
1598
    {
1599
      cpu_set_t mask;
1600
      int cpu_set_cnt = 0;
1601
      int cpu_lowest_set = 0;
1602
      int i = GC_nprocs > 1 ? GC_nprocs : 2; /* check at least 2 cores */
1603

1604
      if (sched_getaffinity(0 /* current process */,
1605
                            sizeof(mask), &mask) == -1)
1606
        ABORT_ARG1("sched_getaffinity failed", ": errno= %d", errno);
1607
      while (i-- > 0)
1608
        if (CPU_ISSET(i, &mask)) {
1609
          cpu_lowest_set = i;
1610
          cpu_set_cnt++;
1611
        }
1612
      if (0 == cpu_set_cnt)
1613
        ABORT("sched_getaffinity returned empty mask");
1614
      if (cpu_set_cnt > 1) {
1615
        CPU_ZERO(&mask);
1616
        CPU_SET(cpu_lowest_set, &mask); /* select just one CPU */
1617
        if (sched_setaffinity(0, sizeof(mask), &mask) == -1)
1618
          ABORT_ARG1("sched_setaffinity failed", ": errno= %d", errno);
1619
        WARN("CPU affinity mask is set to %p\n", (word)1 << cpu_lowest_set);
1620
      }
1621
    }
1622
# endif /* BASE_ATOMIC_OPS_EMULATED */
1623

1624
# ifndef GC_DARWIN_THREADS
1625
    GC_stop_init();
32✔
1626
# endif
1627

1628
# ifdef PARALLEL_MARK
1629
    if (GC_available_markers_m1 <= 0) {
32✔
1630
      /* Disable parallel marking.      */
1631
      GC_parallel = FALSE;
×
1632
      GC_COND_LOG_PRINTF(
×
1633
                "Single marker thread, turning off parallel marking\n");
1634
    } else {
1635
      setup_mark_lock();
32✔
1636
    }
1637
# endif
1638

1639
  /* Add the initial thread, so we can stop it. */
1640
  {
1641
    struct GC_stack_base sb;
1642
    GC_thread me;
1643
    GC_stack_context_t crtn;
1644

1645
    sb.mem_base = GC_stackbottom;
32✔
1646
    GC_ASSERT(sb.mem_base != NULL);
32✔
1647
#   ifdef IA64
1648
      sb.reg_base = GC_register_stackbottom;
1649
#   elif defined(E2K)
1650
      sb.reg_base = NULL;
1651
#   endif
1652
    GC_ASSERT(NULL == GC_self_thread_inner());
32✔
1653
    me = GC_register_my_thread_inner(&sb, thread_id_self());
32✔
1654
    me -> flags = DETACHED;
32✔
1655
    /* Copy the alt-stack information if set. */
1656
    crtn = me -> crtn;
32✔
1657
    crtn -> normstack = (ptr_t)main_normstack;
32✔
1658
    crtn -> normstack_size = main_normstack_size;
32✔
1659
    crtn -> altstack = (ptr_t)main_altstack;
32✔
1660
    crtn -> altstack_size = main_altstack_size;
32✔
1661

1662
#   ifdef CPPCHECK
1663
      GC_noop1((unsigned char)(me -> flags_pad[0]));
1664
#     if defined(THREAD_SANITIZER) && defined(SIGNAL_BASED_STOP_WORLD)
1665
        GC_noop1((unsigned char)(crtn -> dummy[0]));
1666
#     endif
1667
#     ifndef GC_NO_FINALIZATION
1668
        GC_noop1((unsigned char)(crtn -> fnlz_pad[0]));
1669
#     endif
1670
#   endif
1671
  }
1672
}
32✔
1673

1674
#endif /* !GC_WIN32_THREADS */
1675

1676
/* Perform all initializations, including those that may require        */
1677
/* allocation, e.g. initialize thread local free lists if used.         */
1678
/* Must be called before a thread is created.                           */
1679
GC_INNER void GC_init_parallel(void)
32✔
1680
{
1681
# ifdef THREAD_LOCAL_ALLOC
1682
    GC_thread me;
1683

1684
    GC_ASSERT(GC_is_initialized);
32✔
1685
    LOCK();
32✔
1686
    me = GC_self_thread_inner();
32✔
1687
    GC_init_thread_local(&me->tlfs);
32✔
1688
    UNLOCK();
32✔
1689
# endif
1690
# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
1691
    if (GC_win32_dll_threads) {
1692
      set_need_to_lock();
1693
        /* Cannot intercept thread creation.  Hence we don't know if    */
1694
        /* other threads exist.  However, client is not allowed to      */
1695
        /* create other threads before collector initialization.        */
1696
        /* Thus it's OK not to lock before this.                        */
1697
    }
1698
# endif
1699
}
32✔
1700

1701
#if !defined(GC_NO_PTHREAD_SIGMASK) && defined(GC_PTHREADS)
1702
  GC_API int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set,
4✔
1703
                                        sigset_t *oset)
1704
  {
1705
#   ifdef GC_WIN32_THREADS
1706
      /* pthreads-win32 does not support sigmask.       */
1707
      /* So, nothing required here...                   */
1708
#   else
1709
      sigset_t fudged_set;
1710

1711
      INIT_REAL_SYMS();
1712
      if (EXPECT(set != NULL, TRUE)
4✔
1713
          && (how == SIG_BLOCK || how == SIG_SETMASK)) {
2✔
1714
        int sig_suspend = GC_get_suspend_signal();
2✔
1715

1716
        fudged_set = *set;
2✔
1717
        GC_ASSERT(sig_suspend >= 0);
2✔
1718
        if (sigdelset(&fudged_set, sig_suspend) != 0)
2✔
1719
          ABORT("sigdelset failed");
×
1720
        set = &fudged_set;
2✔
1721
      }
1722
#   endif
1723
    return REAL_FUNC(pthread_sigmask)(how, set, oset);
4✔
1724
  }
1725
#endif /* !GC_NO_PTHREAD_SIGMASK */
1726

1727
/* Wrapper for functions that are likely to block for an appreciable    */
1728
/* length of time.                                                      */
1729

1730
static GC_bool do_blocking_enter(GC_thread me)
966✔
1731
{
1732
#   if defined(SPARC) || defined(IA64)
1733
        ptr_t bs_hi = GC_save_regs_in_stack();
1734
        /* TODO: regs saving already done by GC_with_callee_saves_pushed */
1735
#   elif defined(E2K)
1736
        size_t stack_size;
1737
#   endif
1738
    GC_stack_context_t crtn = me -> crtn;
966✔
1739
    GC_bool topOfStackUnset = FALSE;
966✔
1740

1741
    GC_ASSERT(I_HOLD_LOCK());
966✔
1742
    GC_ASSERT((me -> flags & DO_BLOCKING) == 0);
966✔
1743
#   ifdef SPARC
1744
        crtn -> stack_ptr = bs_hi;
1745
#   else
1746
        crtn -> stack_ptr = GC_approx_sp();
966✔
1747
#   endif
1748
#   if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
1749
        if (NULL == crtn -> topOfStack) {
1750
            /* GC_do_blocking_inner is not called recursively,  */
1751
            /* so topOfStack should be computed now.            */
1752
            topOfStackUnset = TRUE;
1753
            crtn -> topOfStack = GC_FindTopOfStack(0);
1754
        }
1755
#   endif
1756
#   ifdef IA64
1757
        crtn -> backing_store_ptr = bs_hi;
1758
#   elif defined(E2K)
1759
        GC_ASSERT(NULL == crtn -> backing_store_end);
1760
        stack_size = GC_alloc_and_get_procedure_stack(
1761
                                        &(crtn -> backing_store_end));
1762
        crtn -> backing_store_ptr = crtn -> backing_store_end + stack_size;
1763
#   endif
1764
    me -> flags |= DO_BLOCKING;
966✔
1765
    /* Save context here if we want to support precise stack marking.   */
1766
    return topOfStackUnset;
966✔
1767
}
1768

1769
static void do_blocking_leave(GC_thread me, GC_bool topOfStackUnset)
966✔
1770
{
1771
    GC_ASSERT(I_HOLD_LOCK());
966✔
1772
    me -> flags &= ~DO_BLOCKING;
966✔
1773
#   ifdef E2K
1774
      {
1775
        GC_stack_context_t crtn = me -> crtn;
1776

1777
        GC_ASSERT(crtn -> backing_store_end != NULL);
1778
        /* Note that value of backing_store_end here may differ from    */
1779
        /* the one stored in this function previously.                  */
1780
        GC_INTERNAL_FREE(crtn -> backing_store_end);
1781
        crtn -> backing_store_ptr = NULL;
1782
        crtn -> backing_store_end = NULL;
1783
      }
1784
#   endif
1785
#   if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
1786
        if (topOfStackUnset)
1787
          me -> crtn -> topOfStack = NULL; /* make it unset again */
1788
#   else
1789
        (void)topOfStackUnset;
1790
#   endif
1791
}
966✔
1792

1793
GC_INNER void GC_do_blocking_inner(ptr_t data, void *context)
84✔
1794
{
1795
    struct blocking_data *d = (struct blocking_data *)data;
84✔
1796
    GC_thread me;
1797
    GC_bool topOfStackUnset;
1798

1799
    UNUSED_ARG(context);
1800
    LOCK();
84✔
1801
    me = GC_self_thread_inner();
84✔
1802
    topOfStackUnset = do_blocking_enter(me);
84✔
1803
    UNLOCK();
84✔
1804

1805
    d -> client_data = (d -> fn)(d -> client_data);
84✔
1806

1807
    LOCK();   /* This will block if the world is stopped.       */
84✔
1808
#   ifdef LINT2
1809
      {
1810
#        ifdef GC_ASSERTIONS
1811
           GC_thread saved_me = me;
1812
#        endif
1813

1814
         /* The pointer to the GC thread descriptor should not be   */
1815
         /* changed while the thread is registered but a static     */
1816
         /* analysis tool might complain that this pointer value    */
1817
         /* (obtained in the first locked section) is unreliable in */
1818
         /* the second locked section.                              */
1819
         me = GC_self_thread_inner();
1820
         GC_ASSERT(me == saved_me);
1821
      }
1822
#   endif
1823
#   if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD)
1824
      /* Note: this code cannot be moved into do_blocking_leave()   */
1825
      /* otherwise there could be a static analysis tool warning    */
1826
      /* (false positive) about unlock without a matching lock.     */
1827
      while (EXPECT((me -> ext_suspend_cnt & 1) != 0, FALSE)) {
168✔
1828
        word suspend_cnt = (word)(me -> ext_suspend_cnt);
×
1829
                        /* read suspend counter (number) before unlocking */
1830

1831
        UNLOCK();
×
1832
        GC_suspend_self_inner(me, suspend_cnt);
×
1833
        LOCK();
×
1834
      }
1835
#   endif
1836
    do_blocking_leave(me, topOfStackUnset);
84✔
1837
    UNLOCK();
84✔
1838
}
84✔
1839

1840
#if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD)
1841
  /* Similar to GC_do_blocking_inner() but assuming the GC lock is held */
1842
  /* and fn is GC_suspend_self_inner.                                   */
1843
  GC_INNER void GC_suspend_self_blocked(ptr_t thread_me, void *context)
882✔
1844
  {
1845
    GC_thread me = (GC_thread)thread_me;
882✔
1846
    GC_bool topOfStackUnset;
1847

1848
    UNUSED_ARG(context);
1849
    GC_ASSERT(I_HOLD_LOCK());
882✔
1850
    topOfStackUnset = do_blocking_enter(me);
882✔
1851
    while ((me -> ext_suspend_cnt & 1) != 0) {
2,646✔
1852
      word suspend_cnt = (word)(me -> ext_suspend_cnt);
882✔
1853

1854
      UNLOCK();
882✔
1855
      GC_suspend_self_inner(me, suspend_cnt);
882✔
1856
      LOCK();
882✔
1857
    }
1858
    do_blocking_leave(me, topOfStackUnset);
882✔
1859
  }
882✔
1860
#endif /* GC_ENABLE_SUSPEND_THREAD */
1861

1862
GC_API void GC_CALL GC_set_stackbottom(void *gc_thread_handle,
42✔
1863
                                       const struct GC_stack_base *sb)
1864
{
1865
    GC_thread t = (GC_thread)gc_thread_handle;
42✔
1866
    GC_stack_context_t crtn;
1867

1868
    GC_ASSERT(sb -> mem_base != NULL);
42✔
1869
    if (!EXPECT(GC_is_initialized, TRUE)) {
42✔
1870
      GC_ASSERT(NULL == t);
×
1871
      /* Alter the stack bottom of the primordial thread.       */
1872
      GC_stackbottom = (char*)(sb -> mem_base);
×
1873
#     ifdef IA64
1874
        GC_register_stackbottom = (ptr_t)(sb -> reg_base);
1875
#     endif
1876
      return;
×
1877
    }
1878

1879
    GC_ASSERT(I_HOLD_LOCK());
42✔
1880
    if (NULL == t) /* current thread? */
42✔
1881
      t = GC_self_thread_inner();
×
1882
    GC_ASSERT(!KNOWN_FINISHED(t));
42✔
1883
    crtn = t -> crtn;
42✔
1884
    GC_ASSERT((t -> flags & DO_BLOCKING) == 0
42✔
1885
              && NULL == crtn -> traced_stack_sect); /* for now */
1886

1887
    crtn -> stack_end = (ptr_t)(sb -> mem_base);
42✔
1888
#   ifdef IA64
1889
      crtn -> backing_store_end = (ptr_t)(sb -> reg_base);
1890
#   endif
1891
#   ifdef GC_WIN32_THREADS
1892
      /* Reset the known minimum (hottest address in the stack). */
1893
      crtn -> last_stack_min = ADDR_LIMIT;
1894
#   endif
1895
}
1896

1897
GC_API void * GC_CALL GC_get_my_stackbottom(struct GC_stack_base *sb)
44✔
1898
{
1899
    GC_thread me;
1900
    GC_stack_context_t crtn;
1901

1902
    LOCK();
44✔
1903
    me = GC_self_thread_inner();
44✔
1904
    /* The thread is assumed to be registered.  */
1905
    crtn = me -> crtn;
44✔
1906
    sb -> mem_base = crtn -> stack_end;
44✔
1907
#   ifdef IA64
1908
      sb -> reg_base = crtn -> backing_store_end;
1909
#   endif
1910
#   ifdef E2K
1911
      sb -> reg_base = NULL;
1912
#   endif
1913
    UNLOCK();
44✔
1914
    return (void *)me; /* gc_thread_handle */
44✔
1915
}
1916

1917
/* GC_call_with_gc_active() has the opposite to GC_do_blocking()        */
1918
/* functionality.  It might be called from a user function invoked by   */
1919
/* GC_do_blocking() to temporarily back allow calling any GC function   */
1920
/* and/or manipulating pointers to the garbage collected heap.          */
1921
GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
84✔
1922
                                             void * client_data)
1923
{
1924
    struct GC_traced_stack_sect_s stacksect;
1925
    GC_thread me;
1926
    GC_stack_context_t crtn;
1927
    ptr_t stack_end;
1928
#   ifdef E2K
1929
      size_t stack_size;
1930
#   endif
1931

1932
    LOCK();   /* This will block if the world is stopped.       */
84✔
1933
    me = GC_self_thread_inner();
84✔
1934
    crtn = me -> crtn;
84✔
1935

1936
    /* Adjust our stack bottom value (this could happen unless  */
1937
    /* GC_get_stack_base() was used which returned GC_SUCCESS). */
1938
    stack_end = crtn -> stack_end; /* read of a volatile field */
84✔
1939
    GC_ASSERT(stack_end != NULL);
84✔
1940
    if ((word)stack_end HOTTER_THAN (word)(&stacksect)) {
84✔
1941
      crtn -> stack_end = (ptr_t)(&stacksect);
×
1942
#     if defined(I386) && defined(GC_WIN32_THREADS)
1943
        crtn -> initial_stack_base = (ptr_t)(&stacksect);
1944
#     endif
1945
    }
1946

1947
    if ((me -> flags & DO_BLOCKING) == 0) {
84✔
1948
      /* We are not inside GC_do_blocking() - do nothing more.  */
1949
      UNLOCK();
×
1950
      client_data = fn(client_data);
×
1951
      /* Prevent treating the above as a tail call.     */
1952
      GC_noop1(COVERT_DATAFLOW(&stacksect));
×
1953
      return client_data; /* result */
84✔
1954
    }
1955

1956
#   if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD)
1957
      while (EXPECT((me -> ext_suspend_cnt & 1) != 0, FALSE)) {
168✔
1958
        word suspend_cnt = (word)(me -> ext_suspend_cnt);
×
1959
        UNLOCK();
×
1960
        GC_suspend_self_inner(me, suspend_cnt);
×
1961
        LOCK();
×
1962
        GC_ASSERT(me -> crtn == crtn);
×
1963
      }
1964
#   endif
1965

1966
    /* Setup new "stack section".       */
1967
    stacksect.saved_stack_ptr = crtn -> stack_ptr;
84✔
1968
#   ifdef IA64
1969
      /* This is the same as in GC_call_with_stack_base().      */
1970
      stacksect.backing_store_end = GC_save_regs_in_stack();
1971
      /* Unnecessarily flushes register stack,          */
1972
      /* but that probably doesn't hurt.                */
1973
      stacksect.saved_backing_store_ptr = crtn -> backing_store_ptr;
1974
#   elif defined(E2K)
1975
      GC_ASSERT(crtn -> backing_store_end != NULL);
1976
      GC_INTERNAL_FREE(crtn -> backing_store_end);
1977
      crtn -> backing_store_ptr = NULL;
1978
      crtn -> backing_store_end = NULL;
1979
#   endif
1980
    stacksect.prev = crtn -> traced_stack_sect;
84✔
1981
    me -> flags &= ~DO_BLOCKING;
84✔
1982
    crtn -> traced_stack_sect = &stacksect;
84✔
1983

1984
    UNLOCK();
84✔
1985
    client_data = fn(client_data);
84✔
1986
    GC_ASSERT((me -> flags & DO_BLOCKING) == 0);
84✔
1987

1988
    /* Restore original "stack section".        */
1989
#   ifdef E2K
1990
      (void)GC_save_regs_in_stack();
1991
#   endif
1992
    LOCK();
84✔
1993
    GC_ASSERT(me -> crtn == crtn);
84✔
1994
    GC_ASSERT(crtn -> traced_stack_sect == &stacksect);
84✔
1995
#   ifdef CPPCHECK
1996
      GC_noop1((word)(crtn -> traced_stack_sect));
1997
#   endif
1998
    crtn -> traced_stack_sect = stacksect.prev;
84✔
1999
#   ifdef IA64
2000
      crtn -> backing_store_ptr = stacksect.saved_backing_store_ptr;
2001
#   elif defined(E2K)
2002
      GC_ASSERT(NULL == crtn -> backing_store_end);
2003
      stack_size = GC_alloc_and_get_procedure_stack(
2004
                                        &(crtn -> backing_store_end));
2005
      crtn -> backing_store_ptr = crtn -> backing_store_end + stack_size;
2006
#   endif
2007
    me -> flags |= DO_BLOCKING;
84✔
2008
    crtn -> stack_ptr = stacksect.saved_stack_ptr;
84✔
2009
    UNLOCK();
84✔
2010

2011
    return client_data; /* result */
84✔
2012
}
2013

2014
STATIC void GC_unregister_my_thread_inner(GC_thread me)
43,799✔
2015
{
2016
    GC_ASSERT(I_HOLD_LOCK());
43,799✔
2017
#   ifdef DEBUG_THREADS
2018
      GC_log_printf("Unregistering thread %p, gc_thread= %p, n_threads= %d\n",
2019
                    (void *)(signed_word)(me -> id), (void *)me,
2020
                    GC_count_threads());
2021
#   endif
2022
    GC_ASSERT(!KNOWN_FINISHED(me));
43,799✔
2023
#   if defined(THREAD_LOCAL_ALLOC)
2024
      GC_ASSERT(GC_getspecific(GC_thread_key) == &me->tlfs);
43,799✔
2025
      GC_destroy_thread_local(&me->tlfs);
43,799✔
2026
#   endif
2027
#   ifdef NACL
2028
      GC_nacl_shutdown_gc_thread();
2029
#   endif
2030
#   ifdef GC_PTHREADS
2031
#     if defined(GC_HAVE_PTHREAD_EXIT) || !defined(GC_NO_PTHREAD_CANCEL)
2032
        /* Handle DISABLED_GC flag which is set by the  */
2033
        /* intercepted pthread_cancel or pthread_exit.  */
2034
        if ((me -> flags & DISABLED_GC) != 0) {
43,799✔
2035
          GC_dont_gc--;
1,175✔
2036
        }
2037
#     endif
2038
      if ((me -> flags & DETACHED) == 0) {
43,799✔
2039
          me -> flags |= FINISHED;
2,517✔
2040
      } else
2041
#   endif
2042
    /* else */ {
2043
      GC_delete_thread(me);
41,282✔
2044
    }
2045
#   if defined(THREAD_LOCAL_ALLOC)
2046
      /* It is required to call remove_specific defined in specific.c. */
2047
      GC_remove_specific(GC_thread_key);
43,799✔
2048
#   endif
2049
}
43,799✔
2050

2051
GC_API int GC_CALL GC_unregister_my_thread(void)
20,543✔
2052
{
2053
    GC_thread me;
2054
    IF_CANCEL(int cancel_state;)
2055

2056
    /* Client should not unregister the thread explicitly if it */
2057
    /* is registered by DllMain, except for the main thread.    */
2058
#   if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
2059
      GC_ASSERT(!GC_win32_dll_threads
2060
                || GC_main_thread_id == thread_id_self());
2061
#   endif
2062

2063
    LOCK();
20,543✔
2064
    DISABLE_CANCEL(cancel_state);
20,544✔
2065
    /* Wait for any GC that may be marking from our stack to    */
2066
    /* complete before we remove this thread.                   */
2067
    GC_wait_for_gc_completion(FALSE);
20,546✔
2068
    me = GC_self_thread_inner();
20,546✔
2069
#   ifdef DEBUG_THREADS
2070
        GC_log_printf(
2071
                "Called GC_unregister_my_thread on %p, gc_thread= %p\n",
2072
                (void *)(signed_word)thread_id_self(), (void *)me);
2073
#   endif
2074
    GC_ASSERT(THREAD_ID_EQUAL(me -> id, thread_id_self()));
20,546✔
2075
    GC_unregister_my_thread_inner(me);
20,546✔
2076
    RESTORE_CANCEL(cancel_state);
20,546✔
2077
    UNLOCK();
20,546✔
2078
    return GC_SUCCESS;
20,546✔
2079
}
2080

2081
#if !defined(GC_NO_PTHREAD_CANCEL) && defined(GC_PTHREADS)
2082
  /* We should deal with the fact that apparently on Solaris and,       */
2083
  /* probably, on some Linux we can't collect while a thread is         */
2084
  /* exiting, since signals aren't handled properly.  This currently    */
2085
  /* gives rise to deadlocks.  The only workaround seen is to intercept */
2086
  /* pthread_cancel() and pthread_exit(), and disable the collections   */
2087
  /* until the thread exit handler is called.  That's ugly, because we  */
2088
  /* risk growing the heap unnecessarily. But it seems that we don't    */
2089
  /* really have an option in that the process is not in a fully        */
2090
  /* functional state while a thread is exiting.                        */
2091
  GC_API int WRAP_FUNC(pthread_cancel)(pthread_t thread)
587✔
2092
  {
2093
#   ifdef CANCEL_SAFE
2094
      GC_thread t;
2095
#   endif
2096

2097
    INIT_REAL_SYMS();
2098
#   ifdef CANCEL_SAFE
2099
      LOCK();
587✔
2100
      t = GC_lookup_by_pthread(thread);
587✔
2101
      /* We test DISABLED_GC because pthread_exit could be called at    */
2102
      /* the same time.  (If t is NULL then pthread_cancel should       */
2103
      /* return ESRCH.)                                                 */
2104
      if (t != NULL && (t -> flags & DISABLED_GC) == 0) {
588✔
2105
        t -> flags |= DISABLED_GC;
588✔
2106
        GC_dont_gc++;
588✔
2107
      }
2108
      UNLOCK();
588✔
2109
#   endif
2110
    return REAL_FUNC(pthread_cancel)(thread);
588✔
2111
  }
2112
#endif /* !GC_NO_PTHREAD_CANCEL */
2113

2114
#ifdef GC_HAVE_PTHREAD_EXIT
2115
  GC_API GC_PTHREAD_EXIT_ATTRIBUTE void WRAP_FUNC(pthread_exit)(void *retval)
882✔
2116
  {
2117
    GC_thread me;
2118

2119
    INIT_REAL_SYMS();
2120
    LOCK();
882✔
2121
    me = GC_self_thread_inner();
882✔
2122
    /* We test DISABLED_GC because someone else could call    */
2123
    /* pthread_cancel at the same time.                       */
2124
    if (me != NULL && (me -> flags & DISABLED_GC) == 0) {
882✔
2125
      me -> flags |= DISABLED_GC;
587✔
2126
      GC_dont_gc++;
587✔
2127
    }
2128
    UNLOCK();
882✔
2129

2130
    REAL_FUNC(pthread_exit)(retval);
882✔
2131
  }
2132
#endif /* GC_HAVE_PTHREAD_EXIT */
2133

2134
GC_API void GC_CALL GC_allow_register_threads(void)
2✔
2135
{
2136
  /* Check GC is initialized and the current thread is registered.  */
2137
  GC_ASSERT(GC_self_thread() != NULL);
2✔
2138

2139
  INIT_REAL_SYMS(); /* to initialize symbols while single-threaded */
2140
  GC_start_mark_threads();
2✔
2141
  set_need_to_lock();
2✔
2142
}
2✔
2143

2144
GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
20,767✔
2145
{
2146
    GC_thread me;
2147

2148
    if (GC_need_to_lock == FALSE)
20,767✔
2149
        ABORT("Threads explicit registering is not previously enabled");
×
2150

2151
    /* We lock here, since we want to wait for an ongoing GC.   */
2152
    LOCK();
20,767✔
2153
    me = GC_self_thread_inner();
20,767✔
2154
    if (EXPECT(NULL == me, TRUE)) {
20,782✔
2155
      me = GC_register_my_thread_inner(sb, thread_id_self());
20,582✔
2156
#     ifdef GC_PTHREADS
2157
#       ifdef CPPCHECK
2158
          GC_noop1(me -> flags);
2159
#       endif
2160
        /* Treat as detached, since we do not need to worry about       */
2161
        /* pointer results.                                             */
2162
        me -> flags |= DETACHED;
20,582✔
2163
#     else
2164
        (void)me;
2165
#     endif
2166
    } else
2167
#   ifdef GC_PTHREADS
2168
      /* else */ if (KNOWN_FINISHED(me)) {
200✔
2169
        /* This code is executed when a thread is registered from the   */
2170
        /* client thread key destructor.                                */
2171
#       ifdef NACL
2172
          GC_nacl_initialize_gc_thread(me);
2173
#       endif
2174
#       ifdef GC_DARWIN_THREADS
2175
          /* Reinitialize mach_thread to avoid thread_suspend fail      */
2176
          /* with MACH_SEND_INVALID_DEST error.                         */
2177
          me -> mach_thread = mach_thread_self();
2178
#       endif
2179
        GC_record_stack_base(me -> crtn, sb);
200✔
2180
        me -> flags &= ~FINISHED; /* but not DETACHED */
200✔
2181
      } else
2182
#   endif
2183
    /* else */ {
2184
        UNLOCK();
×
2185
        return GC_DUPLICATE;
×
2186
    }
2187

2188
#   ifdef THREAD_LOCAL_ALLOC
2189
      GC_init_thread_local(&me->tlfs);
20,782✔
2190
#   endif
2191
#   ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
2192
      /* Since this could be executed from a thread destructor, */
2193
      /* our signals might already be blocked.                  */
2194
      GC_unblock_gc_signals();
2195
#   endif
2196
#   if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD)
2197
      if (EXPECT((me -> ext_suspend_cnt & 1) != 0, FALSE)) {
20,782✔
2198
        GC_with_callee_saves_pushed(GC_suspend_self_blocked, (ptr_t)me);
×
2199
      }
2200
#   endif
2201
    UNLOCK();
20,782✔
2202
    return GC_SUCCESS;
20,782✔
2203
}
2204

2205
#if defined(GC_PTHREADS) \
2206
    && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
2207

2208
  /* Called at thread exit.  Never called for main thread.      */
2209
  /* That is OK, since it results in at most a tiny one-time    */
2210
  /* leak.  And linuxthreads implementation does not reclaim    */
2211
  /* the primordial (main) thread resources or id anyway.       */
2212
  GC_INNER_PTHRSTART void GC_thread_exit_proc(void *arg)
23,252✔
2213
  {
2214
    GC_thread me = (GC_thread)arg;
23,252✔
2215
    IF_CANCEL(int cancel_state;)
2216

2217
#   ifdef DEBUG_THREADS
2218
        GC_log_printf("Called GC_thread_exit_proc on %p, gc_thread= %p\n",
2219
                      (void *)(signed_word)(me -> id), (void *)me);
2220
#   endif
2221
    LOCK();
23,252✔
2222
    DISABLE_CANCEL(cancel_state);
23,253✔
2223
    GC_wait_for_gc_completion(FALSE);
23,253✔
2224
    GC_unregister_my_thread_inner(me);
23,253✔
2225
    RESTORE_CANCEL(cancel_state);
23,253✔
2226
    UNLOCK();
23,253✔
2227
  }
23,253✔
2228

2229
  GC_API int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
2,298✔
2230
  {
2231
    int result;
2232
    GC_thread t;
2233

2234
    INIT_REAL_SYMS();
2235
#   ifdef DEBUG_THREADS
2236
      GC_log_printf("thread %p is joining thread %p\n",
2237
                    (void *)GC_PTHREAD_PTRVAL(pthread_self()),
2238
                    (void *)GC_PTHREAD_PTRVAL(thread));
2239
#   endif
2240

2241
    /* After the join, thread id may have been recycled.                */
2242
    LOCK();
2,298✔
2243
    t = (GC_thread)COVERT_DATAFLOW(GC_lookup_by_pthread(thread));
2,298✔
2244
      /* This is guaranteed to be the intended one, since the thread id */
2245
      /* cannot have been recycled by pthreads.                         */
2246
    UNLOCK();
2,298✔
2247

2248
    result = REAL_FUNC(pthread_join)(thread, retval);
2,298✔
2249
#   if defined(GC_FREEBSD_THREADS)
2250
      /* On FreeBSD, the wrapped pthread_join() sometimes returns       */
2251
      /* (what appears to be) a spurious EINTR which caused the test    */
2252
      /* and real code to fail gratuitously.  Having looked at system   */
2253
      /* pthread library source code, I see how such return code value  */
2254
      /* may be generated.  In one path of the code, pthread_join just  */
2255
      /* returns the errno setting of the thread being joined - this    */
2256
      /* does not match the POSIX specification or the local man pages. */
2257
      /* Thus, I have taken the liberty to catch this one spurious      */
2258
      /* return value.                                                  */
2259
      if (EXPECT(result == EINTR, FALSE)) result = 0;
2260
#   endif
2261

2262
    if (EXPECT(0 == result, TRUE)) {
2,298✔
2263
      LOCK();
2,298✔
2264
      /* Here the pthread id may have been recycled.  Delete the thread */
2265
      /* from GC_threads (unless it has been registered again from the  */
2266
      /* client thread key destructor).                                 */
2267
      if (KNOWN_FINISHED(t)) {
2,298✔
2268
        GC_delete_thread(t);
2,296✔
2269
      }
2270
      UNLOCK();
2,298✔
2271
    }
2272

2273
#   ifdef DEBUG_THREADS
2274
      GC_log_printf("thread %p join with thread %p %s\n",
2275
                    (void *)GC_PTHREAD_PTRVAL(pthread_self()),
2276
                    (void *)GC_PTHREAD_PTRVAL(thread),
2277
                    result != 0 ? "failed" : "succeeded");
2278
#   endif
2279
    return result;
2,298✔
2280
  }
2281

2282
  GC_API int WRAP_FUNC(pthread_detach)(pthread_t thread)
411✔
2283
  {
2284
    int result;
2285
    GC_thread t;
2286

2287
    INIT_REAL_SYMS();
2288
    LOCK();
411✔
2289
    t = (GC_thread)COVERT_DATAFLOW(GC_lookup_by_pthread(thread));
412✔
2290
    UNLOCK();
412✔
2291
    result = REAL_FUNC(pthread_detach)(thread);
412✔
2292
    if (EXPECT(0 == result, TRUE)) {
412✔
2293
      LOCK();
412✔
2294
      /* Here the pthread id may have been recycled.    */
2295
      if (KNOWN_FINISHED(t)) {
412✔
2296
        GC_delete_thread(t);
21✔
2297
      } else {
2298
        t -> flags |= DETACHED;
391✔
2299
      }
2300
      UNLOCK();
412✔
2301
    }
2302
    return result;
412✔
2303
  }
2304

2305
  struct start_info {
2306
    void *(*start_routine)(void *);
2307
    void *arg;
2308
    sem_t registered;           /* 1 ==> in our thread table, but       */
2309
                                /* parent hasn't yet noticed.           */
2310
    unsigned char flags;
2311
  };
2312

2313
  /* Called from GC_pthread_start_inner().  Defined in this file to     */
2314
  /* minimize the number of include files in pthread_start.c (because   */
2315
  /* sem_t and sem_post() are not used in that file directly).          */
2316
  GC_INNER_PTHRSTART GC_thread GC_start_rtn_prepare_thread(
23,253✔
2317
                                        void *(**pstart)(void *),
2318
                                        void **pstart_arg,
2319
                                        struct GC_stack_base *sb, void *arg)
2320
  {
2321
    struct start_info *psi = (struct start_info *)arg;
23,253✔
2322
    thread_id_t self_id = thread_id_self();
23,253✔
2323
    GC_thread me;
2324

2325
#   ifdef DEBUG_THREADS
2326
      GC_log_printf("Starting thread %p, sp= %p\n",
2327
                    (void *)GC_PTHREAD_PTRVAL(pthread_self()), (void *)&arg);
2328
#   endif
2329
    /* If a GC occurs before the thread is registered, that GC will     */
2330
    /* ignore this thread.  That's fine, since it will block trying to  */
2331
    /* acquire the allocation lock, and won't yet hold interesting      */
2332
    /* pointers.                                                        */
2333
    LOCK();
23,255✔
2334
    /* We register the thread here instead of in the parent, so that    */
2335
    /* we don't need to hold the allocation lock during pthread_create. */
2336
    me = GC_register_my_thread_inner(sb, self_id);
23,256✔
2337
    me -> flags = psi -> flags;
23,255✔
2338
#   ifdef GC_WIN32_THREADS
2339
      GC_win32_cache_self_pthread(self_id);
2340
#   endif
2341
#   ifdef THREAD_LOCAL_ALLOC
2342
      GC_init_thread_local(&me->tlfs);
23,255✔
2343
#   endif
2344
    UNLOCK();
23,255✔
2345

2346
    *pstart = psi -> start_routine;
23,255✔
2347
    *pstart_arg = psi -> arg;
23,255✔
2348
#   ifdef DEBUG_THREADS
2349
      GC_log_printf("start_routine= %p\n", (void *)(signed_word)(*pstart));
2350
#   endif
2351
    sem_post(&(psi -> registered));     /* Last action on *psi; */
23,255✔
2352
                                        /* OK to deallocate.    */
2353
    return me;
23,253✔
2354
  }
2355

2356
  STATIC void * GC_pthread_start(void * arg)
23,255✔
2357
  {
2358
#   ifdef INCLUDE_LINUX_THREAD_DESCR
2359
      struct GC_stack_base sb;
2360

2361
#     ifdef REDIRECT_MALLOC
2362
        /* GC_get_stack_base may call pthread_getattr_np, which can     */
2363
        /* unfortunately call realloc, which may allocate from an       */
2364
        /* unregistered thread.  This is unpleasant, since it might     */
2365
        /* force heap growth (or, even, heap overflow).                 */
2366
        GC_disable();
2367
#     endif
2368
      if (GC_get_stack_base(&sb) != GC_SUCCESS)
2369
        ABORT("Failed to get thread stack base");
2370
#     ifdef REDIRECT_MALLOC
2371
        GC_enable();
2372
#     endif
2373
      return GC_pthread_start_inner(&sb, arg);
2374
#   else
2375
      return GC_call_with_stack_base(GC_pthread_start_inner, arg);
23,255✔
2376
#   endif
2377
  }
2378

2379
  GC_API int WRAP_FUNC(pthread_create)(pthread_t *new_thread,
23,492✔
2380
                       GC_PTHREAD_CREATE_CONST pthread_attr_t *attr,
2381
                       void *(*start_routine)(void *), void *arg)
2382
  {
2383
    int result;
2384
    struct start_info si;
2385

2386
    GC_ASSERT(I_DONT_HOLD_LOCK());
23,492✔
2387
    INIT_REAL_SYMS();
2388
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
23,491✔
2389
    GC_ASSERT(GC_thr_initialized);
23,489✔
2390

2391
    if (sem_init(&si.registered, GC_SEM_INIT_PSHARED, 0) != 0)
23,489✔
2392
        ABORT("sem_init failed");
×
2393
    si.flags = 0;
23,491✔
2394
    si.start_routine = start_routine;
23,491✔
2395
    si.arg = arg;
23,491✔
2396

2397
    /* We resist the temptation to muck with the stack size here,       */
2398
    /* even if the default is unreasonably small.  That is the client's */
2399
    /* responsibility.                                                  */
2400
#   ifdef GC_ASSERTIONS
2401
      {
2402
        size_t stack_size = 0;
23,491✔
2403
        if (NULL != attr) {
23,491✔
2404
          if (pthread_attr_getstacksize(attr, &stack_size) != 0)
20,821✔
2405
            ABORT("pthread_attr_getstacksize failed");
×
2406
        }
2407
        if (0 == stack_size) {
23,491✔
2408
          pthread_attr_t my_attr;
2409

2410
          if (pthread_attr_init(&my_attr) != 0)
2,670✔
2411
            ABORT("pthread_attr_init failed");
×
2412
          if (pthread_attr_getstacksize(&my_attr, &stack_size) != 0)
2,670✔
2413
            ABORT("pthread_attr_getstacksize failed");
×
2414
          (void)pthread_attr_destroy(&my_attr);
2,670✔
2415
        }
2416
        /* On Solaris 10 and on Win32 with winpthreads, with the        */
2417
        /* default attr initialization, stack_size remains 0; fudge it. */
2418
        if (EXPECT(0 == stack_size, FALSE)) {
23,491✔
2419
#           if !defined(SOLARIS) && !defined(GC_WIN32_PTHREADS)
2420
              WARN("Failed to get stack size for assertion checking\n", 0);
×
2421
#           endif
2422
            stack_size = 1000000;
×
2423
        }
2424
        GC_ASSERT(stack_size >= 65536);
23,491✔
2425
        /* Our threads may need to do some work for the GC.     */
2426
        /* Ridiculously small threads won't work, and they      */
2427
        /* probably wouldn't work anyway.                       */
2428
      }
2429
#   endif
2430

2431
    if (attr != NULL) {
23,491✔
2432
        int detachstate;
2433

2434
        if (pthread_attr_getdetachstate(attr, &detachstate) != 0)
20,821✔
2435
            ABORT("pthread_attr_getdetachstate failed");
×
2436
        if (PTHREAD_CREATE_DETACHED == detachstate)
20,821✔
2437
          si.flags |= DETACHED;
20,781✔
2438
    }
2439

2440
#   ifdef PARALLEL_MARK
2441
      if (EXPECT(!GC_parallel && GC_available_markers_m1 > 0, FALSE))
23,491✔
2442
        GC_start_mark_threads();
10✔
2443
#   endif
2444
#   ifdef DEBUG_THREADS
2445
      GC_log_printf("About to start new thread from thread %p\n",
2446
                    (void *)GC_PTHREAD_PTRVAL(pthread_self()));
2447
#   endif
2448
    set_need_to_lock();
23,491✔
2449
    result = REAL_FUNC(pthread_create)(new_thread, attr,
23,491✔
2450
                                       GC_pthread_start, &si);
2451

2452
    /* Wait until child has been added to the thread table.             */
2453
    /* This also ensures that we hold onto the stack-allocated si       */
2454
    /* until the child is done with it.                                 */
2455
    if (EXPECT(0 == result, TRUE)) {
23,256✔
2456
        IF_CANCEL(int cancel_state;)
2457

2458
        DISABLE_CANCEL(cancel_state);
23,266✔
2459
                /* pthread_create is not a cancellation point.  */
2460
        while (0 != sem_wait(&si.registered)) {
46,524✔
2461
#           if defined(GC_HAIKU_THREADS)
2462
              /* To workaround some bug in Haiku semaphores.    */
2463
              if (EACCES == errno) continue;
2464
#           endif
2465
            if (EINTR != errno) ABORT("sem_wait failed");
×
2466
        }
2467
        RESTORE_CANCEL(cancel_state);
23,255✔
2468
    }
2469
    sem_destroy(&si.registered);
23,245✔
2470
    return result;
23,252✔
2471
  }
2472

2473
#endif /* GC_PTHREADS && !SN_TARGET_ORBIS && !SN_TARGET_PSP2 */
2474

2475
#if ((defined(GC_PTHREADS_PARAMARK) || defined(USE_PTHREAD_LOCKS)) \
2476
     && !defined(NO_PTHREAD_TRYLOCK)) || defined(USE_SPIN_LOCK)
2477
  /* Spend a few cycles in a way that can't introduce contention with   */
2478
  /* other threads.                                                     */
2479
# define GC_PAUSE_SPIN_CYCLES 10
2480
  STATIC void GC_pause(void)
81,623,023✔
2481
  {
2482
    int i;
2483

2484
    for (i = 0; i < GC_PAUSE_SPIN_CYCLES; ++i) {
895,005,011✔
2485
        /* Something that's unlikely to be optimized away. */
2486
#     if defined(AO_HAVE_compiler_barrier) \
2487
         && !defined(BASE_ATOMIC_OPS_EMULATED)
2488
        AO_compiler_barrier();
813,381,988✔
2489
#     else
2490
        GC_noop1(i);
2491
#     endif
2492
    }
2493
  }
81,623,023✔
2494
#endif /* USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK */
2495

2496
#ifndef SPIN_MAX
2497
# define SPIN_MAX 128   /* Maximum number of calls to GC_pause before   */
2498
                        /* give up.                                     */
2499
#endif
2500

2501
#if (!defined(USE_SPIN_LOCK) && !defined(NO_PTHREAD_TRYLOCK) \
2502
     && defined(USE_PTHREAD_LOCKS)) || defined(GC_PTHREADS_PARAMARK)
2503
  /* If we do not want to use the below spinlock implementation, either */
2504
  /* because we don't have a GC_test_and_set implementation, or because */
2505
  /* we don't want to risk sleeping, we can still try spinning on       */
2506
  /* pthread_mutex_trylock for a while.  This appears to be very        */
2507
  /* beneficial in many cases.                                          */
2508
  /* I suspect that under high contention this is nearly always better  */
2509
  /* than the spin lock.  But it is a bit slower on a uniprocessor.     */
2510
  /* Hence we still default to the spin lock.                           */
2511
  /* This is also used to acquire the mark lock for the parallel        */
2512
  /* marker.                                                            */
2513

2514
  /* Here we use a strict exponential backoff scheme.  I don't know     */
2515
  /* whether that's better or worse than the above.  We eventually      */
2516
  /* yield by calling pthread_mutex_lock(); it never makes sense to     */
2517
  /* explicitly sleep.                                                  */
2518

2519
# ifdef LOCK_STATS
2520
    /* Note that LOCK_STATS requires AO_HAVE_test_and_set.      */
2521
    volatile AO_t GC_spin_count = 0;
2522
    volatile AO_t GC_block_count = 0;
2523
    volatile AO_t GC_unlocked_count = 0;
2524
# endif
2525

2526
  STATIC void GC_generic_lock(pthread_mutex_t * lock)
42,294,946✔
2527
  {
2528
#   ifndef NO_PTHREAD_TRYLOCK
2529
      unsigned pause_length = 1;
42,294,946✔
2530
      unsigned i;
2531

2532
      if (EXPECT(0 == pthread_mutex_trylock(lock), TRUE)) {
42,294,946✔
2533
#       ifdef LOCK_STATS
2534
            (void)AO_fetch_and_add1(&GC_unlocked_count);
2535
#       endif
2536
        return;
84,441,636✔
2537
      }
2538
      for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
8,501,129✔
2539
         for (i = 0; i < pause_length; ++i) {
90,069,889✔
2540
            GC_pause();
81,622,953✔
2541
        }
2542
        switch (pthread_mutex_trylock(lock)) {
8,446,936✔
2543
            case 0:
2544
#               ifdef LOCK_STATS
2545
                    (void)AO_fetch_and_add1(&GC_spin_count);
2546
#               endif
2547
                return;
4,053,406✔
2548
            case EBUSY:
2549
                break;
4,397,314✔
2550
            default:
2551
                ABORT("Unexpected error from pthread_mutex_trylock");
×
2552
        }
2553
      }
2554
#   endif /* !NO_PTHREAD_TRYLOCK */
2555
#   ifdef LOCK_STATS
2556
        (void)AO_fetch_and_add1(&GC_block_count);
2557
#   endif
2558
    pthread_mutex_lock(lock);
50,429✔
2559
  }
2560
#endif /* !USE_SPIN_LOCK || ... */
2561

2562
#if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
2563
  GC_INNER volatile unsigned char GC_collecting = FALSE;
2564
                        /* A hint that we are in the collector and      */
2565
                        /* holding the allocation lock for an           */
2566
                        /* extended period.                             */
2567

2568
# if defined(AO_HAVE_char_load) && !defined(BASE_ATOMIC_OPS_EMULATED)
2569
#   define is_collecting() ((GC_bool)AO_char_load(&GC_collecting))
2570
# else
2571
    /* GC_collecting is a hint, a potential data race between   */
2572
    /* GC_lock() and ENTER/EXIT_GC() is OK to ignore.           */
2573
#   define is_collecting() ((GC_bool)GC_collecting)
2574
# endif
2575
#endif /* GC_PTHREADS && !GC_WIN32_THREADS */
2576

2577
#ifdef GC_ASSERTIONS
2578
  GC_INNER unsigned long GC_lock_holder = NO_THREAD;
2579
#endif
2580

2581
#if defined(USE_SPIN_LOCK)
2582
  /* Reasonably fast spin locks.  Basically the same implementation     */
2583
  /* as STL alloc.h.  This isn't really the right way to do this.       */
2584
  /* but until the POSIX scheduling mess gets straightened out ...      */
2585

2586
  GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
2587

2588
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor  */
2589
# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor   */
2590

2591
  static volatile AO_t spin_max = low_spin_max;
2592
  static volatile AO_t last_spins = 0;
2593
                                /* A potential data race between        */
2594
                                /* threads invoking GC_lock which reads */
2595
                                /* and updates spin_max and last_spins  */
2596
                                /* could be ignored because these       */
2597
                                /* variables are hints only.            */
2598

2599
  GC_INNER void GC_lock(void)
2600
  {
2601
    unsigned my_spin_max;
2602
    unsigned my_last_spins;
2603
    unsigned i;
2604

2605
    if (EXPECT(AO_test_and_set_acquire(&GC_allocate_lock)
2606
                == AO_TS_CLEAR, TRUE)) {
2607
        return;
2608
    }
2609
    my_spin_max = (unsigned)AO_load(&spin_max);
2610
    my_last_spins = (unsigned)AO_load(&last_spins);
2611
    for (i = 0; i < my_spin_max; i++) {
2612
        if (is_collecting() || GC_nprocs == 1)
2613
          goto yield;
2614
        if (i < my_last_spins/2) {
2615
            GC_pause();
2616
            continue;
2617
        }
2618
        if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
2619
            /*
2620
             * got it!
2621
             * Spinning worked.  Thus we're probably not being scheduled
2622
             * against the other process with which we were contending.
2623
             * Thus it makes sense to spin longer the next time.
2624
             */
2625
            AO_store(&last_spins, (AO_t)i);
2626
            AO_store(&spin_max, (AO_t)high_spin_max);
2627
            return;
2628
        }
2629
    }
2630
    /* We are probably being scheduled against the other process.  Sleep. */
2631
    AO_store(&spin_max, (AO_t)low_spin_max);
2632
  yield:
2633
    for (i = 0;; ++i) {
2634
        if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
2635
            return;
2636
        }
2637
#       define SLEEP_THRESHOLD 12
2638
                /* Under Linux very short sleeps tend to wait until     */
2639
                /* the current time quantum expires.  On old Linux      */
2640
                /* kernels nanosleep (<= 2 ms) just spins.              */
2641
                /* (Under 2.4, this happens only for real-time          */
2642
                /* processes.)  We want to minimize both behaviors      */
2643
                /* here.                                                */
2644
        if (i < SLEEP_THRESHOLD) {
2645
            sched_yield();
2646
        } else {
2647
            struct timespec ts;
2648

2649
            if (i > 24) i = 24;
2650
                        /* Don't wait for more than about 15 ms,        */
2651
                        /* even under extreme contention.               */
2652
            ts.tv_sec = 0;
2653
            ts.tv_nsec = 1 << i;
2654
            nanosleep(&ts, 0);
2655
        }
2656
    }
2657
  }
2658

2659
#elif defined(USE_PTHREAD_LOCKS)
2660
  GC_INNER pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
2661

2662
# ifndef NO_PTHREAD_TRYLOCK
2663
    GC_INNER void GC_lock(void)
86,355,591✔
2664
    {
2665
      if (1 == GC_nprocs || is_collecting()) {
86,355,591✔
2666
        pthread_mutex_lock(&GC_allocate_ml);
367,117✔
2667
      } else {
2668
        GC_generic_lock(&GC_allocate_ml);
85,988,474✔
2669
      }
2670
    }
86,487,657✔
2671
# elif defined(GC_ASSERTIONS)
2672
    GC_INNER void GC_lock(void)
2673
    {
2674
      pthread_mutex_lock(&GC_allocate_ml);
2675
    }
2676
# endif
2677

2678
#endif /* !USE_SPIN_LOCK && USE_PTHREAD_LOCKS */
2679

2680
#ifdef GC_PTHREADS_PARAMARK
2681

2682
# if defined(GC_ASSERTIONS) && defined(GC_WIN32_THREADS) \
2683
     && !defined(USE_PTHREAD_LOCKS)
2684
#   define NUMERIC_THREAD_ID(id) (unsigned long)(word)GC_PTHREAD_PTRVAL(id)
2685
    /* Id not guaranteed to be unique. */
2686
# endif
2687

2688
# ifdef GC_ASSERTIONS
2689
    STATIC unsigned long GC_mark_lock_holder = NO_THREAD;
2690
#   define SET_MARK_LOCK_HOLDER \
2691
                (void)(GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()))
2692
#   define UNSET_MARK_LOCK_HOLDER \
2693
                do { \
2694
                  GC_ASSERT(GC_mark_lock_holder \
2695
                                == NUMERIC_THREAD_ID(pthread_self())); \
2696
                  GC_mark_lock_holder = NO_THREAD; \
2697
                } while (0)
2698
# else
2699
#   define SET_MARK_LOCK_HOLDER (void)0
2700
#   define UNSET_MARK_LOCK_HOLDER (void)0
2701
# endif /* !GC_ASSERTIONS */
2702

2703
  static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
2704

2705
# ifndef GC_WIN32_THREADS
2706
    static void setup_mark_lock(void)
32✔
2707
    {
2708
#     ifdef GLIBC_2_19_TSX_BUG
2709
        pthread_mutexattr_t mattr;
2710
        int glibc_minor = -1;
32✔
2711
        int glibc_major = GC_parse_version(&glibc_minor,
32✔
2712
                                           gnu_get_libc_version());
2713

2714
        if (glibc_major > 2 || (glibc_major == 2 && glibc_minor >= 19)) {
32✔
2715
          /* TODO: disable this workaround for glibc with fixed TSX */
2716
          /* This disables lock elision to workaround a bug in glibc 2.19+ */
2717
          if (0 != pthread_mutexattr_init(&mattr)) {
32✔
2718
            ABORT("pthread_mutexattr_init failed");
×
2719
          }
2720
          if (0 != pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_NORMAL)) {
32✔
2721
            ABORT("pthread_mutexattr_settype failed");
×
2722
          }
2723
          if (0 != pthread_mutex_init(&mark_mutex, &mattr)) {
32✔
2724
            ABORT("pthread_mutex_init failed");
×
2725
          }
2726
          (void)pthread_mutexattr_destroy(&mattr);
32✔
2727
        }
2728
#     endif
2729
    }
32✔
2730
# endif /* !GC_WIN32_THREADS */
2731

2732
  GC_INNER void GC_acquire_mark_lock(void)
2,530,528✔
2733
  {
2734
#   if defined(NUMERIC_THREAD_ID_UNIQUE) && !defined(THREAD_SANITIZER)
2735
      GC_ASSERT(GC_mark_lock_holder != NUMERIC_THREAD_ID(pthread_self()));
2,530,528✔
2736
#   endif
2737
    GC_generic_lock(&mark_mutex);
2,530,745✔
2738
    SET_MARK_LOCK_HOLDER;
2,532,273✔
2739
  }
2,532,273✔
2740

2741
  GC_INNER void GC_release_mark_lock(void)
2,532,217✔
2742
  {
2743
    UNSET_MARK_LOCK_HOLDER;
2,532,217✔
2744
    if (pthread_mutex_unlock(&mark_mutex) != 0) {
2,532,217✔
2745
        ABORT("pthread_mutex_unlock failed");
×
2746
    }
2747
  }
2,532,214✔
2748

2749
  /* Collector must wait for a freelist builders for 2 reasons:         */
2750
  /* 1) Mark bits may still be getting examined without lock.           */
2751
  /* 2) Partial free lists referenced only by locals may not be scanned */
2752
  /*    correctly, e.g. if they contain "pointer-free" objects, since   */
2753
  /*    the free-list link may be ignored.                              */
2754
  STATIC void GC_wait_builder(void)
281✔
2755
  {
2756
    ASSERT_CANCEL_DISABLED();
281✔
2757
    UNSET_MARK_LOCK_HOLDER;
281✔
2758
    if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
281✔
2759
        ABORT("pthread_cond_wait failed");
×
2760
    }
2761
    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
281✔
2762
    SET_MARK_LOCK_HOLDER;
281✔
2763
  }
281✔
2764

2765
  GC_INNER void GC_wait_for_reclaim(void)
9,130✔
2766
  {
2767
    GC_acquire_mark_lock();
9,130✔
2768
    while (GC_fl_builder_count > 0) {
18,541✔
2769
        GC_wait_builder();
281✔
2770
    }
2771
    GC_release_mark_lock();
9,130✔
2772
  }
9,130✔
2773

2774
# if defined(CAN_HANDLE_FORK) && defined(THREAD_SANITIZER)
2775
    /* Identical to GC_wait_for_reclaim() but with the no_sanitize      */
2776
    /* attribute as a workaround for TSan which does not notice that    */
2777
    /* the GC lock is acquired in fork_prepare_proc().                  */
2778
    GC_ATTR_NO_SANITIZE_THREAD
2779
    static void wait_for_reclaim_atfork(void)
2780
    {
2781
      GC_acquire_mark_lock();
2782
      while (GC_fl_builder_count > 0)
2783
        GC_wait_builder();
2784
      GC_release_mark_lock();
2785
    }
2786
# endif /* CAN_HANDLE_FORK && THREAD_SANITIZER */
2787

2788
  GC_INNER void GC_notify_all_builder(void)
983,792✔
2789
  {
2790
    GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
983,792✔
2791
    if (pthread_cond_broadcast(&builder_cv) != 0) {
983,792✔
2792
        ABORT("pthread_cond_broadcast failed");
×
2793
    }
2794
  }
983,792✔
2795

2796
  GC_INNER void GC_wait_marker(void)
21,943✔
2797
  {
2798
    ASSERT_CANCEL_DISABLED();
21,943✔
2799
    GC_ASSERT(GC_parallel);
21,943✔
2800
    UNSET_MARK_LOCK_HOLDER;
21,943✔
2801
    if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
21,943✔
2802
        ABORT("pthread_cond_wait failed");
×
2803
    }
2804
    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
21,887✔
2805
    SET_MARK_LOCK_HOLDER;
21,887✔
2806
  }
21,887✔
2807

2808
  GC_INNER void GC_notify_all_marker(void)
24,154✔
2809
  {
2810
    GC_ASSERT(GC_parallel);
24,154✔
2811
    if (pthread_cond_broadcast(&mark_cv) != 0) {
24,154✔
2812
        ABORT("pthread_cond_broadcast failed");
×
2813
    }
2814
  }
24,154✔
2815

2816
#endif /* GC_PTHREADS_PARAMARK */
2817

2818
#ifdef PTHREAD_REGISTER_CANCEL_WEAK_STUBS
2819
  /* Workaround "undefined reference" linkage errors on some targets. */
2820
  EXTERN_C_BEGIN
2821
  extern void __pthread_register_cancel(void) __attribute__((__weak__));
2822
  extern void __pthread_unregister_cancel(void) __attribute__((__weak__));
2823
  EXTERN_C_END
2824

2825
  void __pthread_register_cancel(void) {}
2826
  void __pthread_unregister_cancel(void) {}
2827
#endif
2828

2829
#endif /* THREADS */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc