• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1471

06 Apr 2023 08:09PM UTC coverage: 76.73% (+0.3%) from 76.45%
1471

push

travis-ci-com

ivmai
Travis CI: Do not make static+shared build with mingw32 on Ubuntu

Mingw32 (gcc-mingw-w64) on Ubuntu Jammy has a multiple-definition
issue with linking shared libraries if the exported symbols are
not attributed with dllexport.

7772 of 10129 relevant lines covered (76.73%)

8608212.39 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

82.75
/pthread_support.c
1
/*
2
 * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
3
 * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
4
 * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
5
 * Copyright (c) 2000-2008 by Hewlett-Packard Company.  All rights reserved.
6
 * Copyright (c) 2008-2022 Ivan Maidanski
7
 *
8
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10
 *
11
 * Permission is hereby granted to use or copy this program
12
 * for any purpose, provided the above notices are retained on all copies.
13
 * Permission to modify the code and to distribute modified code is granted,
14
 * provided the above notices are retained, and a notice that the code was
15
 * modified is included with the above copyright notice.
16
 */
17

18
#include "private/pthread_support.h"
19

20
/*
21
 * Support code originally for LinuxThreads, the clone()-based kernel
22
 * thread package for Linux which is included in libc6.
23
 *
24
 * This code no doubt makes some assumptions beyond what is
25
 * guaranteed by the pthread standard, though it now does
26
 * very little of that.  It now also supports NPTL, and many
27
 * other Posix thread implementations.  We are trying to merge
28
 * all flavors of pthread support code into this file.
29
 */
30

31
#ifdef THREADS
32

33
#ifdef GC_PTHREADS
34
# include <errno.h>
35
# ifndef GC_WIN32_PTHREADS
36
#   include <unistd.h>
37
# endif
38
# if defined(GC_DARWIN_THREADS) \
39
     || (defined(GC_WIN32_THREADS) && defined(EMULATE_PTHREAD_SEMAPHORE))
40
#   include "private/darwin_semaphore.h"
41
# elif !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
42
#   include <semaphore.h>
43
# endif
44
#endif /* GC_PTHREADS */
45

46
#ifndef GC_WIN32_THREADS
47
# include <sched.h>
48
# include <time.h>
49
# if !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
50
#   if !defined(GC_RTEMS_PTHREADS)
51
#     include <sys/mman.h>
52
#   endif
53
#   include <sys/time.h>
54
#   include <sys/types.h>
55
#   include <sys/stat.h>
56
#   include <fcntl.h>
57
# endif
58
# include <signal.h>
59
#endif /* !GC_WIN32_THREADS */
60

61
#if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
62
# include <sys/sysctl.h>
63
#endif
64

65
#if defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
66
# include <sys/param.h>
67
# include <sys/sysctl.h>
68
#endif
69

70
#if defined(GC_DGUX386_THREADS)
71
# include <sys/dg_sys_info.h>
72
# include <sys/_int_psem.h>
73
  /* sem_t is an uint in DG/UX */
74
  typedef unsigned int sem_t;
75
#endif /* GC_DGUX386_THREADS */
76

77
#if defined(GC_PTHREADS) \
78
    && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
79
  /* Undefine macros used to redirect pthread primitives.       */
80
# undef pthread_create
81
# ifndef GC_NO_PTHREAD_SIGMASK
82
#   undef pthread_sigmask
83
# endif
84
# ifndef GC_NO_PTHREAD_CANCEL
85
#   undef pthread_cancel
86
# endif
87
# ifdef GC_HAVE_PTHREAD_EXIT
88
#   undef pthread_exit
89
# endif
90
# undef pthread_join
91
# undef pthread_detach
92
# if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
93
     && !defined(_PTHREAD_USE_PTDNAM_)
94
    /* Restore the original mangled names on Tru64 UNIX.        */
95
#   define pthread_create __pthread_create
96
#   define pthread_join   __pthread_join
97
#   define pthread_detach __pthread_detach
98
#   ifndef GC_NO_PTHREAD_CANCEL
99
#     define pthread_cancel __pthread_cancel
100
#   endif
101
#   ifdef GC_HAVE_PTHREAD_EXIT
102
#     define pthread_exit __pthread_exit
103
#   endif
104
# endif /* GC_OSF1_THREADS */
105
#endif /* GC_PTHREADS */
106

107
#if !defined(GC_WIN32_THREADS) \
108
    && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
109
  /* TODO: Enable GC_USE_DLOPEN_WRAP for Cygwin? */
110

111
# ifdef GC_USE_LD_WRAP
112
#   define WRAP_FUNC(f) __wrap_##f
113
#   define REAL_FUNC(f) __real_##f
114
    int REAL_FUNC(pthread_create)(pthread_t *,
115
                                  GC_PTHREAD_CREATE_CONST pthread_attr_t *,
116
                                  void *(*start_routine)(void *), void *);
117
    int REAL_FUNC(pthread_join)(pthread_t, void **);
118
    int REAL_FUNC(pthread_detach)(pthread_t);
119
#   ifndef GC_NO_PTHREAD_SIGMASK
120
      int REAL_FUNC(pthread_sigmask)(int, const sigset_t *, sigset_t *);
121
#   endif
122
#   ifndef GC_NO_PTHREAD_CANCEL
123
      int REAL_FUNC(pthread_cancel)(pthread_t);
124
#   endif
125
#   ifdef GC_HAVE_PTHREAD_EXIT
126
      void REAL_FUNC(pthread_exit)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
127
#   endif
128
# elif defined(GC_USE_DLOPEN_WRAP)
129
#   include <dlfcn.h>
130
#   define WRAP_FUNC(f) f
131
#   define REAL_FUNC(f) GC_real_##f
132
    /* We define both GC_f and plain f to be the wrapped function.  */
133
    /* In that way plain calls work, as do calls from files that    */
134
    /* included gc.h, which redefined f to GC_f.                    */
135
    /* FIXME: Needs work for DARWIN and True64 (OSF1) */
136
    typedef int (* GC_pthread_create_t)(pthread_t *,
137
                                GC_PTHREAD_CREATE_CONST pthread_attr_t *,
138
                                void *(*)(void *), void *);
139
    static GC_pthread_create_t REAL_FUNC(pthread_create);
140
#   ifndef GC_NO_PTHREAD_SIGMASK
141
      typedef int (* GC_pthread_sigmask_t)(int, const sigset_t *, sigset_t *);
142
      static GC_pthread_sigmask_t REAL_FUNC(pthread_sigmask);
143
#   endif
144
    typedef int (* GC_pthread_join_t)(pthread_t, void **);
145
    static GC_pthread_join_t REAL_FUNC(pthread_join);
146
    typedef int (* GC_pthread_detach_t)(pthread_t);
147
    static GC_pthread_detach_t REAL_FUNC(pthread_detach);
148
#   ifndef GC_NO_PTHREAD_CANCEL
149
      typedef int (* GC_pthread_cancel_t)(pthread_t);
150
      static GC_pthread_cancel_t REAL_FUNC(pthread_cancel);
151
#   endif
152
#   ifdef GC_HAVE_PTHREAD_EXIT
153
      typedef void (* GC_pthread_exit_t)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
154
      static GC_pthread_exit_t REAL_FUNC(pthread_exit);
155
#   endif
156
# else
157
#   define WRAP_FUNC(f) GC_##f
158
#   ifdef GC_DGUX386_THREADS
159
#     define REAL_FUNC(f) __d10_##f
160
#   else
161
#     define REAL_FUNC(f) f
162
#   endif
163
# endif /* !GC_USE_LD_WRAP && !GC_USE_DLOPEN_WRAP */
164

165
# if defined(GC_USE_LD_WRAP) || defined(GC_USE_DLOPEN_WRAP)
166
    /* Define GC_ functions as aliases for the plain ones, which will   */
167
    /* be intercepted.  This allows files which include gc.h, and hence */
168
    /* generate references to the GC_ symbols, to see the right ones.   */
169
    GC_API int GC_pthread_create(pthread_t *t,
170
                                 GC_PTHREAD_CREATE_CONST pthread_attr_t *a,
171
                                 void * (*fn)(void *), void *arg)
172
    {
173
      return pthread_create(t, a, fn, arg);
174
    }
175

176
#   ifndef GC_NO_PTHREAD_SIGMASK
177
      GC_API int GC_pthread_sigmask(int how, const sigset_t *mask,
178
                                    sigset_t *old)
179
      {
180
        return pthread_sigmask(how, mask, old);
181
      }
182
#   endif /* !GC_NO_PTHREAD_SIGMASK */
183

184
    GC_API int GC_pthread_join(pthread_t t, void **res)
185
    {
186
      return pthread_join(t, res);
187
    }
188

189
    GC_API int GC_pthread_detach(pthread_t t)
190
    {
191
      return pthread_detach(t);
192
    }
193

194
#   ifndef GC_NO_PTHREAD_CANCEL
195
      GC_API int GC_pthread_cancel(pthread_t t)
196
      {
197
        return pthread_cancel(t);
198
      }
199
#   endif /* !GC_NO_PTHREAD_CANCEL */
200

201
#   ifdef GC_HAVE_PTHREAD_EXIT
202
      GC_API GC_PTHREAD_EXIT_ATTRIBUTE void GC_pthread_exit(void *retval)
203
      {
204
        pthread_exit(retval);
205
      }
206
#   endif
207
# endif /* GC_USE_LD_WRAP || GC_USE_DLOPEN_WRAP */
208

209
# ifdef GC_USE_DLOPEN_WRAP
210
    STATIC GC_bool GC_syms_initialized = FALSE;
211

212
    STATIC void GC_init_real_syms(void)
213
    {
214
      void *dl_handle;
215

216
      GC_ASSERT(!GC_syms_initialized);
217
#     ifdef RTLD_NEXT
218
        dl_handle = RTLD_NEXT;
219
#     else
220
        dl_handle = dlopen("libpthread.so.0", RTLD_LAZY);
221
        if (NULL == dl_handle) {
222
          dl_handle = dlopen("libpthread.so", RTLD_LAZY); /* without ".0" */
223
          if (NULL == dl_handle) ABORT("Couldn't open libpthread");
224
        }
225
#     endif
226
      REAL_FUNC(pthread_create) = (GC_pthread_create_t)(word)
227
                                dlsym(dl_handle, "pthread_create");
228
#     ifdef RTLD_NEXT
229
        if (REAL_FUNC(pthread_create) == 0)
230
          ABORT("pthread_create not found"
231
                " (probably -lgc is specified after -lpthread)");
232
#     endif
233
#     ifndef GC_NO_PTHREAD_SIGMASK
234
        REAL_FUNC(pthread_sigmask) = (GC_pthread_sigmask_t)(word)
235
                                dlsym(dl_handle, "pthread_sigmask");
236
#     endif
237
      REAL_FUNC(pthread_join) = (GC_pthread_join_t)(word)
238
                                dlsym(dl_handle, "pthread_join");
239
      REAL_FUNC(pthread_detach) = (GC_pthread_detach_t)(word)
240
                                dlsym(dl_handle, "pthread_detach");
241
#     ifndef GC_NO_PTHREAD_CANCEL
242
        REAL_FUNC(pthread_cancel) = (GC_pthread_cancel_t)(word)
243
                                dlsym(dl_handle, "pthread_cancel");
244
#     endif
245
#     ifdef GC_HAVE_PTHREAD_EXIT
246
        REAL_FUNC(pthread_exit) = (GC_pthread_exit_t)(word)
247
                                dlsym(dl_handle, "pthread_exit");
248
#     endif
249
      GC_syms_initialized = TRUE;
250
    }
251

252
#   define INIT_REAL_SYMS() if (EXPECT(GC_syms_initialized, TRUE)) {} \
253
                            else GC_init_real_syms()
254
# else
255
#   define INIT_REAL_SYMS() (void)0
256
# endif /* !GC_USE_DLOPEN_WRAP */
257

258
#else
259
# define WRAP_FUNC(f) GC_##f
260
# define REAL_FUNC(f) f
261
# define INIT_REAL_SYMS() (void)0
262
#endif /* GC_WIN32_THREADS */
263

264
#ifndef GC_ALWAYS_MULTITHREADED
265
  GC_INNER GC_bool GC_need_to_lock = FALSE;
266
#endif
267

268
#ifdef THREAD_LOCAL_ALLOC
269
  /* We must explicitly mark ptrfree and gcj free lists, since the free */
270
  /* list links wouldn't otherwise be found.  We also set them in the   */
271
  /* normal free lists, since that involves touching less memory than   */
272
  /* if we scanned them normally.                                       */
273
  GC_INNER void GC_mark_thread_local_free_lists(void)
29,266✔
274
  {
275
    int i;
276
    GC_thread p;
277

278
    for (i = 0; i < THREAD_TABLE_SZ; ++i) {
7,521,362✔
279
      for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
7,732,971✔
280
        if (!KNOWN_FINISHED(p))
240,875✔
281
          GC_mark_thread_local_fls_for(&p->tlfs);
229,965✔
282
      }
283
    }
284
  }
29,266✔
285

286
# if defined(GC_ASSERTIONS)
287
    /* Check that all thread-local free-lists are completely marked.    */
288
    /* Also check that thread-specific-data structures are marked.      */
289
    void GC_check_tls(void)
29,266✔
290
    {
291
        int i;
292
        GC_thread p;
293

294
        for (i = 0; i < THREAD_TABLE_SZ; ++i) {
7,521,267✔
295
          for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
7,732,926✔
296
            if (!KNOWN_FINISHED(p))
240,925✔
297
              GC_check_tls_for(&p->tlfs);
230,015✔
298
          }
299
        }
300
#       if defined(USE_CUSTOM_SPECIFIC)
301
          if (GC_thread_key != 0)
29,267✔
302
            GC_check_tsd_marks(GC_thread_key);
29,235✔
303
#       endif
304
    }
29,267✔
305
# endif /* GC_ASSERTIONS */
306
#endif /* THREAD_LOCAL_ALLOC */
307

308
#ifdef GC_WIN32_THREADS
309
  /* A macro for functions and variables which should be accessible     */
310
  /* from win32_threads.c but otherwise could be static.                */
311
# define GC_INNER_WIN32THREAD GC_INNER
312
#else
313
# define GC_INNER_WIN32THREAD STATIC
314
#endif
315

316
#ifdef PARALLEL_MARK
317

318
# if defined(GC_WIN32_THREADS) || defined(USE_PROC_FOR_LIBRARIES) \
319
     || (defined(IA64) && (defined(HAVE_PTHREAD_ATTR_GET_NP) \
320
                           || defined(HAVE_PTHREAD_GETATTR_NP)))
321
    GC_INNER_WIN32THREAD ptr_t GC_marker_sp[MAX_MARKERS - 1] = {0};
322
                                        /* The cold end of the stack    */
323
                                        /* for markers.                 */
324
# endif /* GC_WIN32_THREADS || USE_PROC_FOR_LIBRARIES */
325

326
# if defined(IA64) && defined(USE_PROC_FOR_LIBRARIES)
327
    static ptr_t marker_bsp[MAX_MARKERS - 1] = {0};
328
# endif
329

330
# if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
331
    static mach_port_t marker_mach_threads[MAX_MARKERS - 1] = {0};
332

333
    /* Used only by GC_suspend_thread_list().   */
334
    GC_INNER GC_bool GC_is_mach_marker(thread_act_t thread)
335
    {
336
      int i;
337
      for (i = 0; i < GC_markers_m1; i++) {
338
        if (marker_mach_threads[i] == thread)
339
          return TRUE;
340
      }
341
      return FALSE;
342
    }
343
# endif /* GC_DARWIN_THREADS */
344

345
# ifdef HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG /* NetBSD */
346
    static void set_marker_thread_name(unsigned id)
347
    {
348
      int err = pthread_setname_np(pthread_self(), "GC-marker-%zu",
349
                                   (void*)(size_t)id);
350
      if (EXPECT(err != 0, FALSE))
351
        WARN("pthread_setname_np failed, errno= %" WARN_PRIdPTR "\n",
352
             (signed_word)err);
353
    }
354
# elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID) \
355
       || defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
356
    static void set_marker_thread_name(unsigned id)
56✔
357
    {
358
      char name_buf[16]; /* pthread_setname_np may fail for longer names */
359
      int len = sizeof("GC-marker-") - 1;
56✔
360

361
      /* Compose the name manually as snprintf may be unavailable or    */
362
      /* "%u directive output may be truncated" warning may occur.      */
363
      BCOPY("GC-marker-", name_buf, len);
56✔
364
      if (id >= 10)
56✔
365
        name_buf[len++] = (char)('0' + (id / 10) % 10);
×
366
      name_buf[len] = (char)('0' + id % 10);
56✔
367
      name_buf[len + 1] = '\0';
56✔
368

369
#     ifdef HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID /* iOS, OS X */
370
        (void)pthread_setname_np(name_buf);
371
#     else /* Linux, Solaris, etc. */
372
        if (EXPECT(pthread_setname_np(pthread_self(), name_buf) != 0, FALSE))
56✔
373
          WARN("pthread_setname_np failed\n", 0);
×
374
#     endif
375
    }
56✔
376
# elif defined(GC_WIN32_THREADS) && !defined(MSWINCE)
377
    /* A pointer to SetThreadDescription() which is available since     */
378
    /* Windows 10.  The function prototype is in processthreadsapi.h.   */
379
    static FARPROC setThreadDescription_fn;
380

381
    GC_INNER void GC_init_win32_thread_naming(HMODULE hK32)
382
    {
383
      if (hK32)
384
        setThreadDescription_fn = GetProcAddress(hK32, "SetThreadDescription");
385
    }
386

387
    static void set_marker_thread_name(unsigned id)
388
    {
389
      WCHAR name_buf[16];
390
      int len = sizeof(L"GC-marker-") / sizeof(WCHAR) - 1;
391
      HRESULT hr;
392

393
      if (!setThreadDescription_fn) return; /* missing SetThreadDescription */
394

395
      /* Compose the name manually as swprintf may be unavailable.      */
396
      BCOPY(L"GC-marker-", name_buf, len * sizeof(WCHAR));
397
      if (id >= 10)
398
        name_buf[len++] = (WCHAR)('0' + (id / 10) % 10);
399
      name_buf[len] = (WCHAR)('0' + id % 10);
400
      name_buf[len + 1] = 0;
401

402
      /* Invoke SetThreadDescription().  Cast the function pointer to word  */
403
      /* first to avoid "incompatible function types" compiler warning.     */
404
      hr = (*(HRESULT (WINAPI *)(HANDLE, const WCHAR *))
405
            (word)setThreadDescription_fn)(GetCurrentThread(), name_buf);
406
      if (hr < 0)
407
        WARN("SetThreadDescription failed\n", 0);
408
    }
409
# else
410
#   define set_marker_thread_name(id) (void)(id)
411
# endif
412

413
  GC_INNER_WIN32THREAD
414
# ifdef GC_PTHREADS_PARAMARK
415
    void *GC_mark_thread(void *id)
56✔
416
# elif defined(MSWINCE)
417
    DWORD WINAPI GC_mark_thread(LPVOID id)
418
# else
419
    unsigned __stdcall GC_mark_thread(void *id)
420
# endif
421
  {
422
    word my_mark_no = 0;
56✔
423
    IF_CANCEL(int cancel_state;)
424

425
    if ((word)id == GC_WORD_MAX) return 0; /* to prevent a compiler warning */
56✔
426
    DISABLE_CANCEL(cancel_state);
56✔
427
                         /* Mark threads are not cancellable; they      */
428
                         /* should be invisible to client.              */
429
    set_marker_thread_name((unsigned)(word)id);
56✔
430
#   if defined(GC_WIN32_THREADS) || defined(USE_PROC_FOR_LIBRARIES) \
431
       || (defined(IA64) && (defined(HAVE_PTHREAD_ATTR_GET_NP) \
432
                             || defined(HAVE_PTHREAD_GETATTR_NP)))
433
      GC_marker_sp[(word)id] = GC_approx_sp();
434
#   endif
435
#   if defined(IA64) && defined(USE_PROC_FOR_LIBRARIES)
436
      marker_bsp[(word)id] = GC_save_regs_in_stack();
437
#   endif
438
#   if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
439
      marker_mach_threads[(word)id] = mach_thread_self();
440
#   endif
441
#   if !defined(GC_PTHREADS_PARAMARK)
442
      GC_marker_Id[(word)id] = thread_id_self();
443
#   endif
444

445
    /* Inform GC_start_mark_threads about completion of marker data init. */
446
    GC_acquire_mark_lock();
56✔
447
    if (0 == --GC_fl_builder_count) /* count may have a negative value */
56✔
448
      GC_notify_all_builder();
56✔
449

450
    /* GC_mark_no is passed only to allow GC_help_marker to terminate   */
451
    /* promptly.  This is important if it were called from the signal   */
452
    /* handler or from the GC lock acquisition code.  Under Linux, it's */
453
    /* not safe to call it from a signal handler, since it uses mutexes */
454
    /* and condition variables.  Since it is called only here, the      */
455
    /* argument is unnecessary.                                         */
456
    for (;; ++my_mark_no) {
3,765✔
457
      if (my_mark_no - GC_mark_no > (word)2) {
3,821✔
458
        /* resynchronize if we get far off, e.g. because GC_mark_no     */
459
        /* wrapped.                                                     */
460
        my_mark_no = GC_mark_no;
43✔
461
      }
462
#     ifdef DEBUG_THREADS
463
        GC_log_printf("Starting helper for mark number %lu (thread %u)\n",
464
                      (unsigned long)my_mark_no, (unsigned)(word)id);
465
#     endif
466
      GC_help_marker(my_mark_no);
3,821✔
467
    }
3,765✔
468
  }
469

470
  GC_INNER_WIN32THREAD int GC_available_markers_m1 = 0;
471

472
#endif /* PARALLEL_MARK */
473

474
#ifdef GC_PTHREADS_PARAMARK
475

476
# ifdef GLIBC_2_1_MUTEX_HACK
477
    /* Ugly workaround for a linux threads bug in the final versions    */
478
    /* of glibc 2.1.  Pthread_mutex_trylock sets the mutex owner        */
479
    /* field even when it fails to acquire the mutex.  This causes      */
480
    /* pthread_cond_wait to die.  Should not be needed for glibc 2.2.   */
481
    /* According to the man page, we should use                         */
482
    /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
483
    /* defined.                                                         */
484
    static pthread_mutex_t mark_mutex =
485
        {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
486
# else
487
    static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
488
# endif
489

490
# ifdef CAN_HANDLE_FORK
491
    static pthread_cond_t mark_cv;
492
                        /* initialized by GC_start_mark_threads_inner   */
493
# else
494
    static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
495
# endif
496

497
  GC_INNER void GC_start_mark_threads_inner(void)
56✔
498
  {
499
    int i;
500
    pthread_attr_t attr;
501
#   ifndef NO_MARKER_SPECIAL_SIGMASK
502
      sigset_t set, oldset;
503
#   endif
504

505
    GC_ASSERT(I_HOLD_LOCK());
56✔
506
    ASSERT_CANCEL_DISABLED();
56✔
507
    if (GC_available_markers_m1 <= 0 || GC_parallel) return;
56✔
508
                /* Skip if parallel markers disabled or already started. */
509
    GC_wait_for_gc_completion(TRUE);
56✔
510

511
#   ifdef CAN_HANDLE_FORK
512
      /* Initialize mark_cv (for the first time), or cleanup its value  */
513
      /* after forking in the child process.  All the marker threads in */
514
      /* the parent process were blocked on this variable at fork, so   */
515
      /* pthread_cond_wait() malfunction (hang) is possible in the      */
516
      /* child process without such a cleanup.                          */
517
      /* TODO: This is not portable, it is better to shortly unblock    */
518
      /* all marker threads in the parent process at fork.              */
519
      {
520
        pthread_cond_t mark_cv_local = PTHREAD_COND_INITIALIZER;
56✔
521
        BCOPY(&mark_cv_local, &mark_cv, sizeof(mark_cv));
56✔
522
      }
523
#   endif
524

525
    GC_ASSERT(GC_fl_builder_count == 0);
56✔
526
    INIT_REAL_SYMS(); /* for pthread_create */
527
    if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
56✔
528
    if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
56✔
529
        ABORT("pthread_attr_setdetachstate failed");
×
530

531
#   ifdef DEFAULT_STACK_MAYBE_SMALL
532
      /* Default stack size is usually too small: increase it.  */
533
      /* Otherwise marker threads or GC may run out of space.   */
534
      {
535
        size_t old_size;
536

537
        if (pthread_attr_getstacksize(&attr, &old_size) != 0)
538
          ABORT("pthread_attr_getstacksize failed");
539
        if (old_size < MIN_STACK_SIZE
540
            && old_size != 0 /* stack size is known */) {
541
          if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
542
            ABORT("pthread_attr_setstacksize failed");
543
        }
544
      }
545
#   endif /* DEFAULT_STACK_MAYBE_SMALL */
546

547
#   ifndef NO_MARKER_SPECIAL_SIGMASK
548
      /* Apply special signal mask to GC marker threads, and don't drop */
549
      /* user defined signals by GC marker threads.                     */
550
      if (sigfillset(&set) != 0)
56✔
551
        ABORT("sigfillset failed");
×
552

553
#     ifdef SIGNAL_BASED_STOP_WORLD
554
        /* These are used by GC to stop and restart the world.  */
555
        if (sigdelset(&set, GC_get_suspend_signal()) != 0
56✔
556
            || sigdelset(&set, GC_get_thr_restart_signal()) != 0)
56✔
557
          ABORT("sigdelset failed");
×
558
#     endif
559

560
      if (EXPECT(REAL_FUNC(pthread_sigmask)(SIG_BLOCK,
56✔
561
                                            &set, &oldset) < 0, FALSE)) {
562
        WARN("pthread_sigmask set failed, no markers started\n", 0);
×
563
        GC_markers_m1 = 0;
×
564
        (void)pthread_attr_destroy(&attr);
×
565
        return;
×
566
      }
567
#   endif /* !NO_MARKER_SPECIAL_SIGMASK */
568

569
    /* To have proper GC_parallel value in GC_help_marker.      */
570
    GC_markers_m1 = GC_available_markers_m1;
56✔
571

572
    for (i = 0; i < GC_available_markers_m1; ++i) {
112✔
573
      pthread_t new_thread;
574

575
#     ifdef GC_WIN32_THREADS
576
        GC_marker_last_stack_min[i] = ADDR_LIMIT;
577
#     endif
578
      if (EXPECT(REAL_FUNC(pthread_create)(&new_thread, &attr, GC_mark_thread,
56✔
579
                                           (void *)(word)i) != 0, FALSE)) {
580
        WARN("Marker thread %" WARN_PRIdPTR " creation failed\n",
×
581
             (signed_word)i);
582
        /* Don't try to create other marker threads.    */
583
        GC_markers_m1 = i;
×
584
        break;
×
585
      }
586
    }
587

588
#   ifndef NO_MARKER_SPECIAL_SIGMASK
589
      /* Restore previous signal mask.  */
590
      if (EXPECT(REAL_FUNC(pthread_sigmask)(SIG_SETMASK,
56✔
591
                                            &oldset, NULL) < 0, FALSE)) {
592
        WARN("pthread_sigmask restore failed\n", 0);
×
593
      }
594
#   endif
595

596
    (void)pthread_attr_destroy(&attr);
56✔
597
    GC_wait_for_markers_init();
56✔
598
    GC_COND_LOG_PRINTF("Started %d mark helper threads\n", GC_markers_m1);
56✔
599
  }
600

601
#endif /* GC_PTHREADS_PARAMARK */
602

603
/* A hash table to keep information about the registered threads.       */
604
/* Not used if GC_win32_dll_threads is set.                             */
605
GC_INNER GC_thread GC_threads[THREAD_TABLE_SZ] = {0};
606

607
/* It may not be safe to allocate when we register the first thread.    */
608
/* Note that next and status fields are unused, but there might be some */
609
/* other fields (crtn and backing_store_end) to be pushed.              */
610
static struct GC_StackContext_Rep first_crtn;
611
static struct GC_Thread_Rep first_thread;
612

613
/* A place to retain a pointer to an allocated object while a thread    */
614
/* registration is ongoing.  Protected by the GC lock.                  */
615
static GC_stack_context_t saved_crtn = NULL;
616

617
#ifdef GC_ASSERTIONS
618
  GC_INNER GC_bool GC_thr_initialized = FALSE;
619
#endif
620

621
void GC_push_thread_structures(void)
×
622
{
623
  GC_ASSERT(I_HOLD_LOCK());
×
624
# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
625
    if (GC_win32_dll_threads) {
626
      /* Unlike the other threads implementations, the thread table     */
627
      /* here contains no pointers to the collectible heap (note also   */
628
      /* that GC_PTHREADS is incompatible with DllMain-based thread     */
629
      /* registration).  Thus we have no private structures we need     */
630
      /* to preserve.                                                   */
631
    } else
632
# endif
633
  /* else */ {
634
    GC_PUSH_ALL_SYM(GC_threads);
×
635
#   ifdef E2K
636
      GC_PUSH_ALL_SYM(first_crtn.backing_store_end);
637
#   endif
638
    GC_ASSERT(NULL == first_thread.tm.next);
×
639
#   ifdef GC_PTHREADS
640
      GC_ASSERT(NULL == first_thread.status);
×
641
#   endif
642
    GC_PUSH_ALL_SYM(first_thread.crtn);
×
643
    GC_PUSH_ALL_SYM(saved_crtn);
×
644
  }
645
# if defined(THREAD_LOCAL_ALLOC) && defined(USE_CUSTOM_SPECIFIC)
646
    GC_PUSH_ALL_SYM(GC_thread_key);
×
647
# endif
648
}
×
649

650
#if defined(MPROTECT_VDB) && defined(GC_WIN32_THREADS)
651
  GC_INNER void GC_win32_unprotect_thread(GC_thread t)
652
  {
653
    if (!GC_win32_dll_threads && GC_auto_incremental) {
654
      GC_stack_context_t crtn = t -> crtn;
655

656
      if (crtn != &first_crtn) {
657
        GC_ASSERT(SMALL_OBJ(GC_size(crtn)));
658
        GC_remove_protection(HBLKPTR(crtn), 1, FALSE);
659
      }
660
      if (t != &first_thread) {
661
        GC_ASSERT(SMALL_OBJ(GC_size(t)));
662
        GC_remove_protection(HBLKPTR(t), 1, FALSE);
663
      }
664
    }
665
  }
666
#endif /* MPROTECT_VDB && GC_WIN32_THREADS */
667

668
#ifdef DEBUG_THREADS
669
  STATIC int GC_count_threads(void)
670
  {
671
    int i;
672
    int count = 0;
673

674
#   if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
675
      if (GC_win32_dll_threads) return -1; /* not implemented */
676
#   endif
677
    GC_ASSERT(I_HOLD_LOCK());
678
    for (i = 0; i < THREAD_TABLE_SZ; ++i) {
679
        GC_thread p;
680

681
        for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
682
            if (!KNOWN_FINISHED(p))
683
                ++count;
684
        }
685
    }
686
    return count;
687
  }
688
#endif /* DEBUG_THREADS */
689

690
/* Add a thread to GC_threads.  We assume it wasn't already there.      */
691
/* The id field is set by the caller.                                   */
692
GC_INNER_WIN32THREAD GC_thread GC_new_thread(thread_id_t self_id)
43,191✔
693
{
694
    int hv = THREAD_TABLE_INDEX(self_id);
43,191✔
695
    GC_thread result;
696

697
    GC_ASSERT(I_HOLD_LOCK());
43,191✔
698
#   ifdef DEBUG_THREADS
699
        GC_log_printf("Creating thread %p\n", (void *)(signed_word)self_id);
700
        for (result = GC_threads[hv];
701
             result != NULL; result = result -> tm.next)
702
          if (!THREAD_ID_EQUAL(result -> id, self_id)) {
703
            GC_log_printf("Hash collision at GC_threads[%d]\n", hv);
704
            break;
705
          }
706
#   endif
707
    if (EXPECT(NULL == first_thread.crtn, FALSE)) {
43,191✔
708
        result = &first_thread;
32✔
709
        first_thread.crtn = &first_crtn;
32✔
710
        GC_ASSERT(NULL == GC_threads[hv]);
32✔
711
#       ifdef CPPCHECK
712
          GC_noop1((unsigned char)first_thread.flags_pad[0]);
713
#         if defined(THREAD_SANITIZER) && defined(SIGNAL_BASED_STOP_WORLD)
714
            GC_noop1((unsigned char)first_crtn.dummy[0]);
715
#         endif
716
#         ifndef GC_NO_FINALIZATION
717
            GC_noop1((unsigned char)first_crtn.fnlz_pad[0]);
718
#         endif
719
#       endif
720
    } else {
721
        GC_stack_context_t crtn;
722

723
        GC_ASSERT(!GC_win32_dll_threads);
724
        GC_ASSERT(!GC_in_thread_creation);
43,159✔
725
        GC_in_thread_creation = TRUE; /* OK to collect from unknown thread */
43,159✔
726
        crtn = (GC_stack_context_t)GC_INTERNAL_MALLOC(
43,159✔
727
                        sizeof(struct GC_StackContext_Rep), NORMAL);
728

729
        /* The current stack is not scanned until the thread is         */
730
        /* registered, thus crtn pointer is to be retained in the       */
731
        /* global data roots for a while (and pushed explicitly if      */
732
        /* a collection occurs here).                                   */
733
        GC_ASSERT(NULL == saved_crtn);
43,159✔
734
        saved_crtn = crtn;
43,159✔
735
        result = (GC_thread)GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep),
43,159✔
736
                                               NORMAL);
737
        saved_crtn = NULL; /* no more collections till thread is registered */
43,159✔
738
        GC_in_thread_creation = FALSE;
43,159✔
739
        if (NULL == crtn || NULL == result)
43,159✔
740
          ABORT("Failed to allocate memory for thread registering");
×
741
        result -> crtn = crtn;
43,159✔
742
    }
743
    /* The id field is not set here. */
744
#   ifdef USE_TKILL_ON_ANDROID
745
      result -> kernel_id = gettid();
746
#   endif
747
    result -> tm.next = GC_threads[hv];
43,191✔
748
    GC_threads[hv] = result;
43,191✔
749
#   ifdef NACL
750
      GC_nacl_initialize_gc_thread(result);
751
#   endif
752
    GC_ASSERT(0 == result -> flags);
43,191✔
753
    if (EXPECT(result != &first_thread, TRUE))
43,191✔
754
      GC_dirty(result);
43,159✔
755
    return result;
43,191✔
756
}
757

758
/* Delete a thread from GC_threads.  We assume it is there.  (The code  */
759
/* intentionally traps if it was not.)  It is also safe to delete the   */
760
/* main thread.  If GC_win32_dll_threads is set, it should be called    */
761
/* only from the thread being deleted.  If a thread has been joined,    */
762
/* but we have not yet been notified, then there may be more than one   */
763
/* thread in the table with the same thread id - this is OK because we  */
764
/* delete a specific one.                                               */
765
GC_INNER_WIN32THREAD void GC_delete_thread(GC_thread t)
42,916✔
766
{
767
# if defined(GC_WIN32_THREADS) && !defined(MSWINCE)
768
    CloseHandle(t -> handle);
769
# endif
770
# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
771
    if (GC_win32_dll_threads) {
772
      /* This is intended to be lock-free.  It is either called         */
773
      /* synchronously from the thread being deleted, or by the joining */
774
      /* thread.  In this branch asynchronous changes to (*t) are       */
775
      /* possible.  Note that it is not allowed to call GC_printf (and  */
776
      /* the friends) here, see GC_stop_world() in win32_threads.c for  */
777
      /* the information.                                               */
778
      t -> crtn -> stack_end = NULL;
779
      t -> id = 0;
780
      t -> flags = 0; /* !IS_SUSPENDED */
781
#     ifdef RETRY_GET_THREAD_CONTEXT
782
        t -> context_sp = NULL;
783
#     endif
784
      AO_store_release(&(t -> tm.in_use), FALSE);
785
    } else
786
# endif
787
  /* else */ {
788
    thread_id_t id = t -> id;
42,916✔
789
    int hv = THREAD_TABLE_INDEX(id);
42,916✔
790
    GC_thread p;
791
    GC_thread prev = NULL;
42,916✔
792

793
    GC_ASSERT(I_HOLD_LOCK());
42,916✔
794
#   if defined(DEBUG_THREADS) && !defined(MSWINCE) \
795
       && (!defined(MSWIN32) || defined(CONSOLE_LOG))
796
      GC_log_printf("Deleting thread %p, n_threads= %d\n",
797
                    (void *)(signed_word)id, GC_count_threads());
798
#   endif
799
    for (p = GC_threads[hv]; p != t; p = p -> tm.next) {
46,794✔
800
      prev = p;
3,878✔
801
    }
802
    if (NULL == prev) {
42,916✔
803
        GC_threads[hv] = p -> tm.next;
39,248✔
804
    } else {
805
        GC_ASSERT(prev != &first_thread);
3,668✔
806
        prev -> tm.next = p -> tm.next;
3,668✔
807
        GC_dirty(prev);
3,668✔
808
    }
809
    if (EXPECT(p != &first_thread, TRUE)) {
42,916✔
810
#     ifdef GC_DARWIN_THREADS
811
        mach_port_deallocate(mach_task_self(), p -> mach_thread);
812
#     endif
813
      GC_ASSERT(p -> crtn != &first_crtn);
42,914✔
814
      GC_INTERNAL_FREE(p -> crtn);
42,914✔
815
      GC_INTERNAL_FREE(p);
42,914✔
816
    }
817
  }
818
}
42,916✔
819

820
/* Return a GC_thread corresponding to a given thread id, or    */
821
/* NULL if it is not there.                                     */
822
/* Caller holds allocation lock or otherwise inhibits updates.  */
823
/* If there is more than one thread with the given id we        */
824
/* return the most recent one.                                  */
825
GC_INNER GC_thread GC_lookup_thread(thread_id_t id)
159,552,924✔
826
{
827
  GC_thread p;
828

829
# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
830
    if (GC_win32_dll_threads)
831
      return GC_win32_dll_lookup_thread(id);
832
# endif
833
  for (p = GC_threads[THREAD_TABLE_INDEX(id)];
319,158,197✔
834
       p != NULL; p = p -> tm.next) {
52,349✔
835
    if (EXPECT(THREAD_ID_EQUAL(p -> id, id), TRUE)) break;
159,584,970✔
836
  }
837
  return p;
159,552,924✔
838
}
839

840
/* Same as GC_self_thread_inner() but acquires the GC lock.     */
841
STATIC GC_thread GC_self_thread(void) {
154,450,843✔
842
  GC_thread p;
843

844
  LOCK();
154,450,843✔
845
  p = GC_self_thread_inner();
154,453,714✔
846
  UNLOCK();
154,455,125✔
847
  return p;
154,452,054✔
848
}
849

850
#ifndef GC_NO_FINALIZATION
851
  /* Called by GC_finalize() (in case of an allocation failure observed). */
852
  GC_INNER void GC_reset_finalizer_nested(void)
×
853
  {
854
    GC_ASSERT(I_HOLD_LOCK());
×
855
    GC_self_thread_inner() -> crtn -> finalizer_nested = 0;
×
856
  }
×
857

858
  /* Checks and updates the thread-local level of finalizers recursion. */
859
  /* Returns NULL if GC_invoke_finalizers() should not be called by the */
860
  /* collector (to minimize the risk of a deep finalizers recursion),   */
861
  /* otherwise returns a pointer to the thread-local finalizer_nested.  */
862
  /* Called by GC_notify_or_invoke_finalizers() only.                   */
863
  GC_INNER unsigned char *GC_check_finalizer_nested(void)
1,663✔
864
  {
865
    GC_stack_context_t crtn;
866
    unsigned nesting_level;
867

868
    GC_ASSERT(I_HOLD_LOCK());
1,663✔
869
    crtn = GC_self_thread_inner() -> crtn;
1,663✔
870
    nesting_level = crtn -> finalizer_nested;
1,663✔
871
    if (nesting_level) {
1,663✔
872
      /* We are inside another GC_invoke_finalizers().          */
873
      /* Skip some implicitly-called GC_invoke_finalizers()     */
874
      /* depending on the nesting (recursion) level.            */
875
      if (++(crtn -> finalizer_skipped) < (1U << nesting_level))
×
876
        return NULL;
×
877
      crtn -> finalizer_skipped = 0;
×
878
    }
879
    crtn -> finalizer_nested = (unsigned char)(nesting_level + 1);
1,663✔
880
    return &(crtn -> finalizer_nested);
1,663✔
881
  }
882
#endif /* !GC_NO_FINALIZATION */
883

884
#if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
885
  /* This is called from thread-local GC_malloc(). */
886
  GC_bool GC_is_thread_tsd_valid(void *tsd)
154,448,359✔
887
  {
888
    GC_thread me = GC_self_thread();
154,448,359✔
889

890
    return (word)tsd >= (word)(&me->tlfs)
308,903,034✔
891
            && (word)tsd < (word)(&me->tlfs) + sizeof(me->tlfs);
154,451,517✔
892
  }
893
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
894

895
GC_API int GC_CALL GC_thread_is_registered(void)
46✔
896
{
897
  /* TODO: Use GC_get_tlfs() instead. */
898
  GC_thread me = GC_self_thread();
46✔
899

900
  return me != NULL && !KNOWN_FINISHED(me);
46✔
901
}
902

903
GC_API void GC_CALL GC_register_altstack(void *normstack,
×
904
                GC_word normstack_size, void *altstack, GC_word altstack_size)
905
{
906
#ifdef GC_WIN32_THREADS
907
  /* TODO: Implement */
908
  UNUSED_ARG(normstack);
909
  UNUSED_ARG(normstack_size);
910
  UNUSED_ARG(altstack);
911
  UNUSED_ARG(altstack_size);
912
#else
913
  GC_thread me;
914
  GC_stack_context_t crtn;
915

916
  LOCK();
×
917
  me = GC_self_thread_inner();
×
918
  if (EXPECT(NULL == me, FALSE)) {
×
919
    /* We are called before GC_thr_init. */
920
    me = &first_thread;
×
921
  }
922
  crtn = me -> crtn;
×
923
  crtn -> normstack = (ptr_t)normstack;
×
924
  crtn -> normstack_size = normstack_size;
×
925
  crtn -> altstack = (ptr_t)altstack;
×
926
  crtn -> altstack_size = altstack_size;
×
927
  UNLOCK();
×
928
#endif
929
}
×
930

931
#ifdef USE_PROC_FOR_LIBRARIES
932
  GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
933
  {
934
    int i;
935
    GC_thread p;
936

937
    GC_ASSERT(I_HOLD_LOCK());
938
#   ifdef PARALLEL_MARK
939
      for (i = 0; i < GC_markers_m1; ++i) {
940
        if ((word)GC_marker_sp[i] > (word)lo
941
            && (word)GC_marker_sp[i] < (word)hi)
942
          return TRUE;
943
#       ifdef IA64
944
          if ((word)marker_bsp[i] > (word)lo
945
              && (word)marker_bsp[i] < (word)hi)
946
            return TRUE;
947
#       endif
948
      }
949
#   endif
950
    for (i = 0; i < THREAD_TABLE_SZ; i++) {
951
      for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
952
        GC_stack_context_t crtn = p -> crtn;
953

954
        if (crtn -> stack_end != NULL) {
955
#         ifdef STACK_GROWS_UP
956
            if ((word)crtn -> stack_end >= (word)lo
957
                && (word)crtn -> stack_end < (word)hi)
958
              return TRUE;
959
#         else /* STACK_GROWS_DOWN */
960
            if ((word)crtn -> stack_end > (word)lo
961
                && (word)crtn -> stack_end <= (word)hi)
962
              return TRUE;
963
#         endif
964
        }
965
      }
966
    }
967
    return FALSE;
968
  }
969
#endif /* USE_PROC_FOR_LIBRARIES */
970

971
#if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
972
    && defined(IA64)
973
  /* Find the largest stack base smaller than bound.  May be used       */
974
  /* to find the boundary between a register stack and adjacent         */
975
  /* immediately preceding memory stack.                                */
976
  GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound)
977
  {
978
    int i;
979
    GC_thread p;
980
    ptr_t result = 0;
981

982
    GC_ASSERT(I_HOLD_LOCK());
983
#   ifdef PARALLEL_MARK
984
      for (i = 0; i < GC_markers_m1; ++i) {
985
        if ((word)GC_marker_sp[i] > (word)result
986
            && (word)GC_marker_sp[i] < (word)bound)
987
          result = GC_marker_sp[i];
988
      }
989
#   endif
990
    for (i = 0; i < THREAD_TABLE_SZ; i++) {
991
      for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
992
        GC_stack_context_t crtn = p -> crtn;
993

994
        if ((word)(crtn -> stack_end) > (word)result
995
            && (word)(crtn -> stack_end) < (word)bound) {
996
          result = crtn -> stack_end;
997
        }
998
      }
999
    }
1000
    return result;
1001
  }
1002
#endif /* IA64 */
1003

1004
#ifndef STAT_READ
1005
# define STAT_READ read
1006
        /* If read is wrapped, this may need to be redefined to call    */
1007
        /* the real one.                                                */
1008
#endif
1009

1010
#ifdef GC_HPUX_THREADS
1011
# define GC_get_nprocs() pthread_num_processors_np()
1012

1013
#elif defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \
1014
      || defined(GC_HAIKU_THREADS) || defined(GC_SOLARIS_THREADS) \
1015
      || defined(HURD) || defined(HOST_ANDROID) || defined(NACL)
1016
  GC_INLINE int GC_get_nprocs(void)
1017
  {
1018
    int nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
1019
    return nprocs > 0 ? nprocs : 1; /* ignore error silently */
1020
  }
1021

1022
#elif defined(GC_IRIX_THREADS)
1023
  GC_INLINE int GC_get_nprocs(void)
1024
  {
1025
    int nprocs = (int)sysconf(_SC_NPROC_ONLN);
1026
    return nprocs > 0 ? nprocs : 1; /* ignore error silently */
1027
  }
1028

1029
#elif defined(GC_LINUX_THREADS) /* && !HOST_ANDROID && !NACL */
1030
  /* Return the number of processors. */
1031
  STATIC int GC_get_nprocs(void)
32✔
1032
  {
1033
    /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that     */
1034
    /* appears to be buggy in many cases.                             */
1035
    /* We look for lines "cpu<n>" in /proc/stat.                      */
1036
#   define PROC_STAT_BUF_SZ ((1 + MAX_MARKERS) * 100) /* should be enough */
1037
    /* No need to read the entire /proc/stat to get maximum cpu<N> as   */
1038
    /* - the requested lines are located at the beginning of the file;  */
1039
    /* - the lines with cpu<N> where N > MAX_MARKERS are not needed.    */
1040
    char stat_buf[PROC_STAT_BUF_SZ+1];
1041
    int f;
1042
    int result, i, len;
1043

1044
    f = open("/proc/stat", O_RDONLY);
32✔
1045
    if (f < 0) {
32✔
1046
      WARN("Could not open /proc/stat\n", 0);
×
1047
      return 1; /* assume an uniprocessor */
32✔
1048
    }
1049
    len = STAT_READ(f, stat_buf, sizeof(stat_buf)-1);
32✔
1050
    /* Unlikely that we need to retry because of an incomplete read here. */
1051
    if (len < 0) {
32✔
1052
      WARN("Failed to read /proc/stat, errno= %" WARN_PRIdPTR "\n",
×
1053
           (signed_word)errno);
1054
      close(f);
×
1055
      return 1;
×
1056
    }
1057
    stat_buf[len] = '\0'; /* to avoid potential buffer overrun by atoi() */
32✔
1058
    close(f);
32✔
1059

1060
    result = 1;
32✔
1061
        /* Some old kernels only have a single "cpu nnnn ..."   */
1062
        /* entry in /proc/stat.  We identify those as           */
1063
        /* uniprocessors.                                       */
1064

1065
    for (i = 0; i < len - 4; ++i) {
37,076✔
1066
      if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
37,044✔
1067
          && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
96✔
1068
        int cpu_no = atoi(&stat_buf[i + 4]);
64✔
1069
        if (cpu_no >= result)
64✔
1070
          result = cpu_no + 1;
32✔
1071
      }
1072
    }
1073
    return result;
32✔
1074
  }
1075

1076
#elif defined(GC_DGUX386_THREADS)
1077
  /* Return the number of processors, or i <= 0 if it can't be determined. */
1078
  STATIC int GC_get_nprocs(void)
1079
  {
1080
    int numCpus;
1081
    struct dg_sys_info_pm_info pm_sysinfo;
1082
    int status = 0;
1083

1084
    status = dg_sys_info((long int *) &pm_sysinfo,
1085
        DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
1086
    if (status < 0) {
1087
       /* set -1 for error */
1088
       numCpus = -1;
1089
    } else {
1090
      /* Active CPUs */
1091
      numCpus = pm_sysinfo.idle_vp_count;
1092
    }
1093
    return numCpus;
1094
  }
1095

1096
#elif defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) \
1097
      || defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
1098
  STATIC int GC_get_nprocs(void)
1099
  {
1100
    int mib[] = {CTL_HW,HW_NCPU};
1101
    int res;
1102
    size_t len = sizeof(res);
1103

1104
    sysctl(mib, sizeof(mib)/sizeof(int), &res, &len, NULL, 0);
1105
    return res;
1106
  }
1107

1108
#else
1109
  /* E.g., GC_RTEMS_PTHREADS */
1110
# define GC_get_nprocs() 1 /* not implemented */
1111
#endif /* !GC_LINUX_THREADS && !GC_DARWIN_THREADS && ... */
1112

1113
#if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
1114
  /* Some buggy Linux/arm kernels show only non-sleeping CPUs in        */
1115
  /* /proc/stat (and /proc/cpuinfo), so another data system source is   */
1116
  /* tried first.  Result <= 0 on error.                                */
1117
  STATIC int GC_get_nprocs_present(void)
1118
  {
1119
    char stat_buf[16];
1120
    int f;
1121
    int len;
1122

1123
    f = open("/sys/devices/system/cpu/present", O_RDONLY);
1124
    if (f < 0)
1125
      return -1; /* cannot open the file */
1126

1127
    len = STAT_READ(f, stat_buf, sizeof(stat_buf));
1128
    close(f);
1129

1130
    /* Recognized file format: "0\n" or "0-<max_cpu_id>\n"      */
1131
    /* The file might probably contain a comma-separated list   */
1132
    /* but we do not need to handle it (just silently ignore).  */
1133
    if (len < 2 || stat_buf[0] != '0' || stat_buf[len - 1] != '\n') {
1134
      return 0; /* read error or unrecognized content */
1135
    } else if (len == 2) {
1136
      return 1; /* an uniprocessor */
1137
    } else if (stat_buf[1] != '-') {
1138
      return 0; /* unrecognized content */
1139
    }
1140

1141
    stat_buf[len - 1] = '\0'; /* terminate the string */
1142
    return atoi(&stat_buf[2]) + 1; /* skip "0-" and parse max_cpu_num */
1143
  }
1144
#endif /* ARM32 && GC_LINUX_THREADS && !NACL */
1145

1146
#if defined(CAN_HANDLE_FORK) && defined(THREAD_SANITIZER)
1147
# include "private/gc_pmark.h" /* for MS_NONE */
1148

1149
  /* Workaround for TSan which does not notice that the GC lock */
1150
  /* is acquired in fork_prepare_proc().                        */
1151
  GC_ATTR_NO_SANITIZE_THREAD
1152
  static GC_bool collection_in_progress(void)
1153
  {
1154
    return GC_mark_state != MS_NONE;
1155
  }
1156
#else
1157
# define collection_in_progress() GC_collection_in_progress()
1158
#endif
1159

1160
/* We hold the GC lock.  Wait until an in-progress GC has finished.     */
1161
/* Repeatedly releases the GC lock in order to wait.                    */
1162
/* If wait_for_all is true, then we exit with the GC lock held and no   */
1163
/* collection in progress; otherwise we just wait for the current GC    */
1164
/* to finish.                                                           */
1165
GC_INNER void GC_wait_for_gc_completion(GC_bool wait_for_all)
43,278✔
1166
{
1167
# if !defined(THREAD_SANITIZER) || !defined(CAN_CALL_ATFORK)
1168
    /* GC_lock_holder is accessed with the lock held, so there is no    */
1169
    /* data race actually (unlike what is reported by TSan).            */
1170
    GC_ASSERT(I_HOLD_LOCK());
43,278✔
1171
# endif
1172
  ASSERT_CANCEL_DISABLED();
43,278✔
1173
# ifdef GC_DISABLE_INCREMENTAL
1174
    (void)wait_for_all;
1175
# else
1176
    if (GC_incremental && collection_in_progress()) {
43,278✔
1177
        word old_gc_no = GC_gc_no;
×
1178

1179
        /* Make sure that no part of our stack is still on the mark     */
1180
        /* stack, since it's about to be unmapped.                      */
1181
        do {
1182
            ENTER_GC();
×
1183
            GC_ASSERT(!GC_in_thread_creation);
×
1184
            GC_in_thread_creation = TRUE;
×
1185
            GC_collect_a_little_inner(1);
×
1186
            GC_in_thread_creation = FALSE;
×
1187
            EXIT_GC();
×
1188

1189
            UNLOCK();
×
1190
#           ifdef GC_WIN32_THREADS
1191
              Sleep(0);
1192
#           else
1193
              sched_yield();
×
1194
#           endif
1195
            LOCK();
×
1196
        } while (GC_incremental && collection_in_progress()
×
1197
                 && (wait_for_all || old_gc_no == GC_gc_no));
×
1198
    }
1199
# endif
1200
}
43,278✔
1201

1202
#ifdef CAN_HANDLE_FORK
1203

1204
  /* Procedures called before and after a fork.  The goal here is to    */
1205
  /* make it safe to call GC_malloc() in a forked child.  It is unclear */
1206
  /* that is attainable, since the single UNIX spec seems to imply that */
1207
  /* one should only call async-signal-safe functions, and we probably  */
1208
  /* cannot quite guarantee that.  But we give it our best shot.  (That */
1209
  /* same spec also implies that it is not safe to call the system      */
1210
  /* malloc between fork and exec.  Thus we're doing no worse than it.) */
1211

1212
  IF_CANCEL(static int fork_cancel_state;) /* protected by allocation lock */
1213

1214
# ifdef PARALLEL_MARK
1215
#   ifdef THREAD_SANITIZER
1216
#     if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1217
        STATIC void GC_generic_lock(pthread_mutex_t *);
1218
#     endif
1219
      GC_ATTR_NO_SANITIZE_THREAD
1220
      static void wait_for_reclaim_atfork(void);
1221
#   else
1222
#     define wait_for_reclaim_atfork() GC_wait_for_reclaim()
1223
#   endif
1224
# endif /* PARALLEL_MARK */
1225

1226
  /* Prevent TSan false positive about the race during items removal    */
1227
  /* from GC_threads.  (The race cannot happen since only one thread    */
1228
  /* survives in the child.)                                            */
1229
# ifdef CAN_CALL_ATFORK
1230
    GC_ATTR_NO_SANITIZE_THREAD
1231
# endif
1232
  static void store_to_threads_table(int hv, GC_thread me)
10,794✔
1233
  {
1234
    GC_threads[hv] = me;
10,794✔
1235
  }
10,794✔
1236

1237
  /* Remove all entries from the GC_threads table, except the one for   */
1238
  /* the current thread.  We need to do this in the child process after */
1239
  /* a fork(), since only the current thread survives in the child.     */
1240
  STATIC void GC_remove_all_threads_but_me(void)
42✔
1241
  {
1242
    int hv;
1243
    GC_thread me = NULL;
42✔
1244
    pthread_t self = pthread_self(); /* same as in parent */
42✔
1245
#   ifndef GC_WIN32_THREADS
1246
#     define pthread_id id
1247
#   endif
1248

1249
    for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
10,794✔
1250
      GC_thread p, next;
1251

1252
      for (p = GC_threads[hv]; p != NULL; p = next) {
11,634✔
1253
        next = p -> tm.next;
882✔
1254
        if (THREAD_EQUAL(p -> pthread_id, self)
882✔
1255
            && me == NULL) { /* ignore dead threads with the same id */
42✔
1256
          me = p;
42✔
1257
          p -> tm.next = NULL;
42✔
1258
        } else {
1259
#         ifdef THREAD_LOCAL_ALLOC
1260
            if (!KNOWN_FINISHED(p)) {
840✔
1261
              /* Cannot call GC_destroy_thread_local here.  The free    */
1262
              /* lists may be in an inconsistent state (as thread p may */
1263
              /* be updating one of the lists by GC_generic_malloc_many */
1264
              /* or GC_FAST_MALLOC_GRANS when fork is invoked).         */
1265
              /* This should not be a problem because the lost elements */
1266
              /* of the free lists will be collected during GC.         */
1267
              GC_remove_specific_after_fork(GC_thread_key, p -> pthread_id);
840✔
1268
            }
1269
#         endif
1270
          /* TODO: To avoid TSan hang (when updating GC_bytes_freed),   */
1271
          /* we just skip explicit freeing of GC_threads entries.       */
1272
#         if !defined(THREAD_SANITIZER) || !defined(CAN_CALL_ATFORK)
1273
            if (p != &first_thread) {
840✔
1274
              /* TODO: Should call mach_port_deallocate? */
1275
              GC_ASSERT(p -> crtn != &first_crtn);
800✔
1276
              GC_INTERNAL_FREE(p -> crtn);
800✔
1277
              GC_INTERNAL_FREE(p);
800✔
1278
            }
1279
#         endif
1280
        }
1281
      }
1282
      store_to_threads_table(hv, NULL);
10,752✔
1283
    }
1284

1285
#   ifdef LINT2
1286
      if (NULL == me) ABORT("Current thread is not found after fork");
1287
#   else
1288
      GC_ASSERT(me != NULL);
42✔
1289
#   endif
1290
#   ifdef GC_WIN32_THREADS
1291
      /* Update Win32 thread id and handle.     */
1292
      me -> id = thread_id_self(); /* differs from that in parent */
1293
#     ifndef MSWINCE
1294
        if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
1295
                        GetCurrentProcess(), (HANDLE *)&(me -> handle),
1296
                        0 /* dwDesiredAccess */, FALSE /* bInheritHandle */,
1297
                        DUPLICATE_SAME_ACCESS))
1298
          ABORT("DuplicateHandle failed");
1299
#     endif
1300
#   endif
1301
#   ifdef GC_DARWIN_THREADS
1302
      /* Update thread Id after fork (it is OK to call  */
1303
      /* GC_destroy_thread_local and GC_free_inner      */
1304
      /* before update).                                */
1305
      me -> mach_thread = mach_thread_self();
1306
#   endif
1307
#   ifdef USE_TKILL_ON_ANDROID
1308
      me -> kernel_id = gettid();
1309
#   endif
1310

1311
    /* Put "me" back to GC_threads.     */
1312
    store_to_threads_table(THREAD_TABLE_INDEX(me -> id), me);
42✔
1313

1314
#   if defined(THREAD_LOCAL_ALLOC) && !defined(USE_CUSTOM_SPECIFIC)
1315
      /* Some TLS implementations (e.g., on Cygwin) might be not        */
1316
      /* fork-friendly, so we re-assign thread-local pointer to 'tlfs'  */
1317
      /* for safety instead of the assertion check (again, it is OK to  */
1318
      /* call GC_destroy_thread_local and GC_free_inner before).        */
1319
      {
1320
        int res = GC_setspecific(GC_thread_key, &me->tlfs);
1321

1322
        if (COVERT_DATAFLOW(res) != 0)
1323
          ABORT("GC_setspecific failed (in child)");
1324
      }
1325
#   endif
1326
#   undef pthread_id
1327
  }
42✔
1328

1329
  /* Called before a fork().    */
1330
# if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1331
    /* GC_lock_holder is updated safely (no data race actually).        */
1332
    GC_ATTR_NO_SANITIZE_THREAD
1333
# endif
1334
  static void fork_prepare_proc(void)
84✔
1335
  {
1336
    /* Acquire all relevant locks, so that after releasing the locks    */
1337
    /* the child will see a consistent state in which monitor           */
1338
    /* invariants hold.  Unfortunately, we can't acquire libc locks     */
1339
    /* we might need, and there seems to be no guarantee that libc      */
1340
    /* must install a suitable fork handler.                            */
1341
    /* Wait for an ongoing GC to finish, since we can't finish it in    */
1342
    /* the (one remaining thread in) the child.                         */
1343

1344
      LOCK();
84✔
1345
      DISABLE_CANCEL(fork_cancel_state);
84✔
1346
                /* Following waits may include cancellation points. */
1347
#     ifdef PARALLEL_MARK
1348
        if (GC_parallel)
84✔
1349
          wait_for_reclaim_atfork();
84✔
1350
#     endif
1351
      GC_wait_for_gc_completion(TRUE);
84✔
1352
#     ifdef PARALLEL_MARK
1353
        if (GC_parallel) {
84✔
1354
#         if defined(THREAD_SANITIZER) && defined(GC_ASSERTIONS) \
1355
             && defined(CAN_CALL_ATFORK)
1356
            /* Prevent TSan false positive about the data race  */
1357
            /* when updating GC_mark_lock_holder.               */
1358
            GC_generic_lock(&mark_mutex);
1359
#         else
1360
            GC_acquire_mark_lock();
84✔
1361
#         endif
1362
        }
1363
#     endif
1364
      GC_acquire_dirty_lock();
1365
  }
84✔
1366

1367
  /* Called in parent after a fork() (even if the latter failed).       */
1368
# if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1369
    GC_ATTR_NO_SANITIZE_THREAD
1370
# endif
1371
  static void fork_parent_proc(void)
42✔
1372
  {
1373
    GC_release_dirty_lock();
1374
#   ifdef PARALLEL_MARK
1375
      if (GC_parallel) {
42✔
1376
#       if defined(THREAD_SANITIZER) && defined(GC_ASSERTIONS) \
1377
           && defined(CAN_CALL_ATFORK)
1378
          /* To match that in fork_prepare_proc. */
1379
          (void)pthread_mutex_unlock(&mark_mutex);
1380
#       else
1381
          GC_release_mark_lock();
42✔
1382
#       endif
1383
      }
1384
#   endif
1385
    RESTORE_CANCEL(fork_cancel_state);
42✔
1386
    UNLOCK();
42✔
1387
  }
42✔
1388

1389
  /* Called in child after a fork().    */
1390
# if defined(GC_ASSERTIONS) && defined(CAN_CALL_ATFORK)
1391
    GC_ATTR_NO_SANITIZE_THREAD
1392
# endif
1393
  static void fork_child_proc(void)
42✔
1394
  {
1395
    GC_release_dirty_lock();
1396
#   ifdef PARALLEL_MARK
1397
      if (GC_parallel) {
42✔
1398
#       if defined(THREAD_SANITIZER) && defined(GC_ASSERTIONS) \
1399
           && defined(CAN_CALL_ATFORK)
1400
          (void)pthread_mutex_unlock(&mark_mutex);
1401
#       else
1402
          GC_release_mark_lock();
42✔
1403
#       endif
1404
        /* Turn off parallel marking in the child, since we are probably  */
1405
        /* just going to exec, and we would have to restart mark threads. */
1406
        GC_parallel = FALSE;
42✔
1407
      }
1408
#     ifdef THREAD_SANITIZER
1409
        /* TSan does not support threads creation in the child process. */
1410
        GC_available_markers_m1 = 0;
1411
#     endif
1412
#   endif
1413
    /* Clean up the thread table, so that just our thread is left.      */
1414
    GC_remove_all_threads_but_me();
42✔
1415
#   ifndef GC_DISABLE_INCREMENTAL
1416
      GC_dirty_update_child();
42✔
1417
#   endif
1418
    RESTORE_CANCEL(fork_cancel_state);
42✔
1419
    UNLOCK();
42✔
1420
    /* Even though after a fork the child only inherits the single      */
1421
    /* thread that called the fork(), if another thread in the parent   */
1422
    /* was attempting to lock the mutex while being held in             */
1423
    /* fork_child_prepare(), the mutex will be left in an inconsistent  */
1424
    /* state in the child after the UNLOCK.  This is the case, at       */
1425
    /* least, in Mac OS X and leads to an unusable GC in the child      */
1426
    /* which will block when attempting to perform any GC operation     */
1427
    /* that acquires the allocation mutex.                              */
1428
#   if defined(USE_PTHREAD_LOCKS) && !defined(GC_WIN32_THREADS)
1429
      GC_ASSERT(I_DONT_HOLD_LOCK());
42✔
1430
      /* Reinitialize the mutex.  It should be safe since we are        */
1431
      /* running this in the child which only inherits a single thread. */
1432
      /* mutex_destroy() may return EBUSY, which makes no sense, but    */
1433
      /* that is the reason for the need of the reinitialization.       */
1434
      /* Note: excluded for Cygwin as does not seem to be needed.       */
1435
      (void)pthread_mutex_destroy(&GC_allocate_ml);
42✔
1436
      /* TODO: Probably some targets might need the default mutex       */
1437
      /* attribute to be passed instead of NULL.                        */
1438
      if (0 != pthread_mutex_init(&GC_allocate_ml, NULL))
42✔
1439
        ABORT("pthread_mutex_init failed (in child)");
×
1440
#   endif
1441
  }
42✔
1442

1443
  /* Routines for fork handling by client (no-op if pthread_atfork works). */
1444
  GC_API void GC_CALL GC_atfork_prepare(void)
42✔
1445
  {
1446
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
42✔
1447
#   if defined(GC_DARWIN_THREADS) && defined(MPROTECT_VDB)
1448
      if (GC_auto_incremental) {
1449
        GC_ASSERT(0 == GC_handle_fork);
1450
        ABORT("Unable to fork while mprotect_thread is running");
1451
      }
1452
#   endif
1453
    if (GC_handle_fork <= 0)
42✔
1454
      fork_prepare_proc();
×
1455
  }
42✔
1456

1457
  GC_API void GC_CALL GC_atfork_parent(void)
42✔
1458
  {
1459
    if (GC_handle_fork <= 0)
42✔
1460
      fork_parent_proc();
×
1461
  }
42✔
1462

1463
  GC_API void GC_CALL GC_atfork_child(void)
42✔
1464
  {
1465
    if (GC_handle_fork <= 0)
42✔
1466
      fork_child_proc();
×
1467
  }
42✔
1468

1469
  /* Prepare for forks if requested.    */
1470
  GC_INNER_WIN32THREAD void GC_setup_atfork(void)
32✔
1471
  {
1472
    if (GC_handle_fork) {
32✔
1473
#     ifdef CAN_CALL_ATFORK
1474
        if (pthread_atfork(fork_prepare_proc, fork_parent_proc,
32✔
1475
                           fork_child_proc) == 0) {
1476
          /* Handlers successfully registered.  */
1477
          GC_handle_fork = 1;
32✔
1478
        } else
1479
#     endif
1480
      /* else */ if (GC_handle_fork != -1)
×
1481
        ABORT("pthread_atfork failed");
×
1482
    }
1483
  }
32✔
1484

1485
#endif /* CAN_HANDLE_FORK */
1486

1487
#ifdef INCLUDE_LINUX_THREAD_DESCR
1488
  __thread int GC_dummy_thread_local;
1489
#endif
1490

1491
#ifdef PARALLEL_MARK
1492
# ifndef GC_WIN32_THREADS
1493
    static void setup_mark_lock(void);
1494
# endif
1495

1496
  GC_INNER_WIN32THREAD unsigned GC_required_markers_cnt = 0;
1497
                        /* The default value (0) means the number of    */
1498
                        /* markers should be selected automatically.    */
1499

1500
  GC_API void GC_CALL GC_set_markers_count(unsigned markers)
2✔
1501
  {
1502
    GC_required_markers_cnt = markers < MAX_MARKERS ? markers : MAX_MARKERS;
2✔
1503
  }
2✔
1504
#endif /* PARALLEL_MARK */
1505

1506
GC_INNER GC_bool GC_in_thread_creation = FALSE;
1507
                                /* Protected by allocation lock. */
1508

1509
GC_INNER_WIN32THREAD void GC_record_stack_base(GC_stack_context_t crtn,
43,411✔
1510
                                               const struct GC_stack_base *sb)
1511
{
1512
# if !defined(GC_DARWIN_THREADS) && !defined(GC_WIN32_THREADS)
1513
    crtn -> stack_ptr = (ptr_t)sb->mem_base;
43,411✔
1514
# endif
1515
  if ((crtn -> stack_end = (ptr_t)sb->mem_base) == NULL)
43,411✔
1516
    ABORT("Bad stack base in GC_register_my_thread");
×
1517
# ifdef IA64
1518
    crtn -> backing_store_end = (ptr_t)sb->reg_base;
1519
# elif defined(I386) && defined(GC_WIN32_THREADS)
1520
    crtn -> initial_stack_base = (ptr_t)sb->mem_base;
1521
# endif
1522
}
43,411✔
1523

1524
#if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS) \
1525
    || !defined(DONT_USE_ATEXIT)
1526
  GC_INNER_WIN32THREAD thread_id_t GC_main_thread_id;
1527
#endif
1528

1529
#ifndef DONT_USE_ATEXIT
1530
  GC_INNER GC_bool GC_is_main_thread(void)
4✔
1531
  {
1532
    GC_ASSERT(GC_thr_initialized);
4✔
1533
    return thread_id_self() == GC_main_thread_id;
4✔
1534
  }
1535
#endif /* !DONT_USE_ATEXIT */
1536

1537
#ifndef GC_WIN32_THREADS
1538

1539
STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
43,190✔
1540
                                             thread_id_t self_id)
1541
{
1542
  GC_thread me;
1543

1544
  GC_ASSERT(I_HOLD_LOCK());
43,190✔
1545
  me = GC_new_thread(self_id);
43,190✔
1546
  me -> id = self_id;
43,190✔
1547
# ifdef GC_DARWIN_THREADS
1548
    me -> mach_thread = mach_thread_self();
1549
# endif
1550
  GC_record_stack_base(me -> crtn, sb);
43,190✔
1551
  return me;
43,190✔
1552
}
1553

1554
  STATIC int GC_nprocs = 1;
1555
                        /* Number of processors.  We may not have       */
1556
                        /* access to all of them, but this is as good   */
1557
                        /* a guess as any ...                           */
1558

1559
GC_INNER void GC_thr_init(void)
32✔
1560
{
1561
  GC_ASSERT(I_HOLD_LOCK());
32✔
1562
  GC_ASSERT(!GC_thr_initialized);
32✔
1563
  GC_ASSERT((word)(&GC_threads) % sizeof(word) == 0);
1564
# ifdef GC_ASSERTIONS
1565
    GC_thr_initialized = TRUE;
32✔
1566
# endif
1567
# ifdef CAN_HANDLE_FORK
1568
    GC_setup_atfork();
32✔
1569
# endif
1570

1571
# ifdef INCLUDE_LINUX_THREAD_DESCR
1572
    /* Explicitly register the region including the address     */
1573
    /* of a thread local variable.  This should include thread  */
1574
    /* locals for the main thread, except for those allocated   */
1575
    /* in response to dlopen calls.                             */
1576
    {
1577
      ptr_t thread_local_addr = (ptr_t)(&GC_dummy_thread_local);
1578
      ptr_t main_thread_start, main_thread_end;
1579
      if (!GC_enclosing_mapping(thread_local_addr, &main_thread_start,
1580
                                &main_thread_end)) {
1581
        ABORT("Failed to find mapping for main thread thread locals");
1582
      } else {
1583
        /* main_thread_start and main_thread_end are initialized.       */
1584
        GC_add_roots_inner(main_thread_start, main_thread_end, FALSE);
1585
      }
1586
    }
1587
# endif
1588

1589
  /* Set GC_nprocs and GC_available_markers_m1. */
1590
  {
1591
    char * nprocs_string = GETENV("GC_NPROCS");
32✔
1592
    GC_nprocs = -1;
32✔
1593
    if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
32✔
1594
  }
1595
  if (GC_nprocs <= 0
32✔
1596
#     if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
1597
        && (GC_nprocs = GC_get_nprocs_present()) <= 1
1598
                                /* Workaround for some Linux/arm kernels */
1599
#     endif
1600
      )
1601
  {
1602
    GC_nprocs = GC_get_nprocs();
32✔
1603
  }
1604
  if (GC_nprocs <= 0) {
32✔
1605
    WARN("GC_get_nprocs() returned %" WARN_PRIdPTR "\n",
×
1606
         (signed_word)GC_nprocs);
1607
    GC_nprocs = 2; /* assume dual-core */
×
1608
#   ifdef PARALLEL_MARK
1609
      GC_available_markers_m1 = 0; /* but use only one marker */
×
1610
#   endif
1611
  } else {
1612
#   ifdef PARALLEL_MARK
1613
      {
1614
        char * markers_string = GETENV("GC_MARKERS");
32✔
1615
        int markers = GC_required_markers_cnt;
32✔
1616

1617
        if (markers_string != NULL) {
32✔
1618
          markers = atoi(markers_string);
×
1619
          if (markers <= 0 || markers > MAX_MARKERS) {
×
1620
            WARN("Too big or invalid number of mark threads: %" WARN_PRIdPTR
×
1621
                 "; using maximum threads\n", (signed_word)markers);
1622
            markers = MAX_MARKERS;
×
1623
          }
1624
        } else if (0 == markers) {
32✔
1625
          /* Unless the client sets the desired number of       */
1626
          /* parallel markers, it is determined based on the    */
1627
          /* number of CPU cores.                               */
1628
          markers = GC_nprocs;
32✔
1629
#         if defined(GC_MIN_MARKERS) && !defined(CPPCHECK)
1630
            /* This is primarily for targets without getenv().  */
1631
            if (markers < GC_MIN_MARKERS)
1632
              markers = GC_MIN_MARKERS;
1633
#         endif
1634
          if (markers > MAX_MARKERS)
32✔
1635
            markers = MAX_MARKERS; /* silently limit the value */
×
1636
        }
1637
        GC_available_markers_m1 = markers - 1;
32✔
1638
      }
1639
#   endif
1640
  }
1641
  GC_COND_LOG_PRINTF("Number of processors: %d\n", GC_nprocs);
32✔
1642

1643
# if defined(BASE_ATOMIC_OPS_EMULATED) && defined(SIGNAL_BASED_STOP_WORLD)
1644
    /* Ensure the process is running on just one CPU core.      */
1645
    /* This is needed because the AO primitives emulated with   */
1646
    /* locks cannot be used inside signal handlers.             */
1647
    {
1648
      cpu_set_t mask;
1649
      int cpu_set_cnt = 0;
1650
      int cpu_lowest_set = 0;
1651
      int i = GC_nprocs > 1 ? GC_nprocs : 2; /* check at least 2 cores */
1652

1653
      if (sched_getaffinity(0 /* current process */,
1654
                            sizeof(mask), &mask) == -1)
1655
        ABORT_ARG1("sched_getaffinity failed", ": errno= %d", errno);
1656
      while (i-- > 0)
1657
        if (CPU_ISSET(i, &mask)) {
1658
          cpu_lowest_set = i;
1659
          cpu_set_cnt++;
1660
        }
1661
      if (0 == cpu_set_cnt)
1662
        ABORT("sched_getaffinity returned empty mask");
1663
      if (cpu_set_cnt > 1) {
1664
        CPU_ZERO(&mask);
1665
        CPU_SET(cpu_lowest_set, &mask); /* select just one CPU */
1666
        if (sched_setaffinity(0, sizeof(mask), &mask) == -1)
1667
          ABORT_ARG1("sched_setaffinity failed", ": errno= %d", errno);
1668
        WARN("CPU affinity mask is set to %p\n", (word)1 << cpu_lowest_set);
1669
      }
1670
    }
1671
# endif /* BASE_ATOMIC_OPS_EMULATED */
1672

1673
# ifndef GC_DARWIN_THREADS
1674
    GC_stop_init();
32✔
1675
# endif
1676

1677
# ifdef PARALLEL_MARK
1678
    if (GC_available_markers_m1 <= 0) {
32✔
1679
      /* Disable parallel marking.      */
1680
      GC_parallel = FALSE;
×
1681
      GC_COND_LOG_PRINTF(
×
1682
                "Single marker thread, turning off parallel marking\n");
1683
    } else {
1684
      setup_mark_lock();
32✔
1685
    }
1686
# endif
1687

1688
  /* Add the initial thread, so we can stop it. */
1689
  {
1690
    struct GC_stack_base sb;
1691
    GC_thread me;
1692
    thread_id_t self_id = thread_id_self();
32✔
1693

1694
    sb.mem_base = GC_stackbottom;
32✔
1695
    GC_ASSERT(sb.mem_base != NULL);
32✔
1696
#   ifdef IA64
1697
      sb.reg_base = GC_register_stackbottom;
1698
#   elif defined(E2K)
1699
      sb.reg_base = NULL;
1700
#   endif
1701
    GC_ASSERT(NULL == GC_self_thread_inner());
32✔
1702
    me = GC_register_my_thread_inner(&sb, self_id);
32✔
1703
#   ifndef DONT_USE_ATEXIT
1704
      GC_main_thread_id = self_id;
32✔
1705
#   endif
1706
    me -> flags = DETACHED;
32✔
1707
  }
1708
}
32✔
1709

1710
#endif /* !GC_WIN32_THREADS */
1711

1712
/* Perform all initializations, including those that may require        */
1713
/* allocation, e.g. initialize thread local free lists if used.         */
1714
/* Must be called before a thread is created.                           */
1715
GC_INNER void GC_init_parallel(void)
32✔
1716
{
1717
# ifdef THREAD_LOCAL_ALLOC
1718
    GC_thread me;
1719

1720
    GC_ASSERT(GC_is_initialized);
32✔
1721
    LOCK();
32✔
1722
    me = GC_self_thread_inner();
32✔
1723
    GC_init_thread_local(&me->tlfs);
32✔
1724
    UNLOCK();
32✔
1725
# endif
1726
# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
1727
    if (GC_win32_dll_threads) {
1728
      set_need_to_lock();
1729
        /* Cannot intercept thread creation.  Hence we don't know if    */
1730
        /* other threads exist.  However, client is not allowed to      */
1731
        /* create other threads before collector initialization.        */
1732
        /* Thus it's OK not to lock before this.                        */
1733
    }
1734
# endif
1735
}
32✔
1736

1737
#if !defined(GC_NO_PTHREAD_SIGMASK) && defined(GC_PTHREADS)
1738
  GC_API int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set,
4✔
1739
                                        sigset_t *oset)
1740
  {
1741
#   ifdef GC_WIN32_THREADS
1742
      /* pthreads-win32 does not support sigmask.       */
1743
      /* So, nothing required here...                   */
1744
#   else
1745
      sigset_t fudged_set;
1746

1747
      INIT_REAL_SYMS();
1748
      if (EXPECT(set != NULL, TRUE)
4✔
1749
          && (how == SIG_BLOCK || how == SIG_SETMASK)) {
2✔
1750
        int sig_suspend = GC_get_suspend_signal();
2✔
1751

1752
        fudged_set = *set;
2✔
1753
        GC_ASSERT(sig_suspend >= 0);
2✔
1754
        if (sigdelset(&fudged_set, sig_suspend) != 0)
2✔
1755
          ABORT("sigdelset failed");
×
1756
        set = &fudged_set;
2✔
1757
      }
1758
#   endif
1759
    return REAL_FUNC(pthread_sigmask)(how, set, oset);
4✔
1760
  }
1761
#endif /* !GC_NO_PTHREAD_SIGMASK */
1762

1763
/* Wrapper for functions that are likely to block for an appreciable    */
1764
/* length of time.                                                      */
1765

1766
static GC_bool do_blocking_enter(GC_thread me)
966✔
1767
{
1768
#   if defined(SPARC) || defined(IA64)
1769
        ptr_t bs_hi = GC_save_regs_in_stack();
1770
        /* TODO: regs saving already done by GC_with_callee_saves_pushed */
1771
#   elif defined(E2K)
1772
        size_t stack_size;
1773
#   endif
1774
    GC_stack_context_t crtn = me -> crtn;
966✔
1775
    GC_bool topOfStackUnset = FALSE;
966✔
1776

1777
    GC_ASSERT(I_HOLD_LOCK());
966✔
1778
    GC_ASSERT((me -> flags & DO_BLOCKING) == 0);
966✔
1779
#   ifdef SPARC
1780
        crtn -> stack_ptr = bs_hi;
1781
#   else
1782
        crtn -> stack_ptr = GC_approx_sp();
966✔
1783
#   endif
1784
#   if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
1785
        if (NULL == crtn -> topOfStack) {
1786
            /* GC_do_blocking_inner is not called recursively,  */
1787
            /* so topOfStack should be computed now.            */
1788
            topOfStackUnset = TRUE;
1789
            crtn -> topOfStack = GC_FindTopOfStack(0);
1790
        }
1791
#   endif
1792
#   ifdef IA64
1793
        crtn -> backing_store_ptr = bs_hi;
1794
#   elif defined(E2K)
1795
        GC_ASSERT(NULL == crtn -> backing_store_end);
1796
        stack_size = GC_alloc_and_get_procedure_stack(
1797
                                        &(crtn -> backing_store_end));
1798
        crtn -> backing_store_ptr = crtn -> backing_store_end + stack_size;
1799
#   endif
1800
    me -> flags |= DO_BLOCKING;
966✔
1801
    /* Save context here if we want to support precise stack marking.   */
1802
    return topOfStackUnset;
966✔
1803
}
1804

1805
static void do_blocking_leave(GC_thread me, GC_bool topOfStackUnset)
966✔
1806
{
1807
    GC_ASSERT(I_HOLD_LOCK());
966✔
1808
    me -> flags &= ~DO_BLOCKING;
966✔
1809
#   ifdef E2K
1810
      {
1811
        GC_stack_context_t crtn = me -> crtn;
1812

1813
        GC_ASSERT(crtn -> backing_store_end != NULL);
1814
        /* Note that value of backing_store_end here may differ from    */
1815
        /* the one stored in this function previously.                  */
1816
        GC_INTERNAL_FREE(crtn -> backing_store_end);
1817
        crtn -> backing_store_ptr = NULL;
1818
        crtn -> backing_store_end = NULL;
1819
      }
1820
#   endif
1821
#   if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
1822
        if (topOfStackUnset)
1823
          me -> crtn -> topOfStack = NULL; /* make it unset again */
1824
#   else
1825
        (void)topOfStackUnset;
1826
#   endif
1827
}
966✔
1828

1829
GC_INNER void GC_do_blocking_inner(ptr_t data, void *context)
84✔
1830
{
1831
    struct blocking_data *d = (struct blocking_data *)data;
84✔
1832
    GC_thread me;
1833
    GC_bool topOfStackUnset;
1834

1835
    UNUSED_ARG(context);
1836
    LOCK();
84✔
1837
    me = GC_self_thread_inner();
84✔
1838
    topOfStackUnset = do_blocking_enter(me);
84✔
1839
    UNLOCK();
84✔
1840

1841
    d -> client_data = (d -> fn)(d -> client_data);
84✔
1842

1843
    LOCK();   /* This will block if the world is stopped.       */
84✔
1844
#   ifdef LINT2
1845
      {
1846
#        ifdef GC_ASSERTIONS
1847
           GC_thread saved_me = me;
1848
#        endif
1849

1850
         /* The pointer to the GC thread descriptor should not be   */
1851
         /* changed while the thread is registered but a static     */
1852
         /* analysis tool might complain that this pointer value    */
1853
         /* (obtained in the first locked section) is unreliable in */
1854
         /* the second locked section.                              */
1855
         me = GC_self_thread_inner();
1856
         GC_ASSERT(me == saved_me);
1857
      }
1858
#   endif
1859
#   if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD)
1860
      /* Note: this code cannot be moved into do_blocking_leave()   */
1861
      /* otherwise there could be a static analysis tool warning    */
1862
      /* (false positive) about unlock without a matching lock.     */
1863
      while (EXPECT((me -> ext_suspend_cnt & 1) != 0, FALSE)) {
168✔
1864
        word suspend_cnt = (word)(me -> ext_suspend_cnt);
×
1865
                        /* read suspend counter (number) before unlocking */
1866

1867
        UNLOCK();
×
1868
        GC_suspend_self_inner(me, suspend_cnt);
×
1869
        LOCK();
×
1870
      }
1871
#   endif
1872
    do_blocking_leave(me, topOfStackUnset);
84✔
1873
    UNLOCK();
84✔
1874
}
84✔
1875

1876
#if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD)
1877
  /* Similar to GC_do_blocking_inner() but assuming the GC lock is held */
1878
  /* and fn is GC_suspend_self_inner.                                   */
1879
  GC_INNER void GC_suspend_self_blocked(ptr_t thread_me, void *context)
882✔
1880
  {
1881
    GC_thread me = (GC_thread)thread_me;
882✔
1882
    GC_bool topOfStackUnset;
1883

1884
    UNUSED_ARG(context);
1885
    GC_ASSERT(I_HOLD_LOCK());
882✔
1886
    topOfStackUnset = do_blocking_enter(me);
882✔
1887
    while ((me -> ext_suspend_cnt & 1) != 0) {
2,646✔
1888
      word suspend_cnt = (word)(me -> ext_suspend_cnt);
882✔
1889

1890
      UNLOCK();
882✔
1891
      GC_suspend_self_inner(me, suspend_cnt);
882✔
1892
      LOCK();
881✔
1893
    }
1894
    do_blocking_leave(me, topOfStackUnset);
882✔
1895
  }
882✔
1896
#endif /* GC_ENABLE_SUSPEND_THREAD */
1897

1898
GC_API void GC_CALL GC_set_stackbottom(void *gc_thread_handle,
42✔
1899
                                       const struct GC_stack_base *sb)
1900
{
1901
    GC_thread t = (GC_thread)gc_thread_handle;
42✔
1902
    GC_stack_context_t crtn;
1903

1904
    GC_ASSERT(sb -> mem_base != NULL);
42✔
1905
    if (!EXPECT(GC_is_initialized, TRUE)) {
42✔
1906
      GC_ASSERT(NULL == t);
×
1907
      /* Alter the stack bottom of the primordial thread.       */
1908
      GC_stackbottom = (char*)(sb -> mem_base);
×
1909
#     ifdef IA64
1910
        GC_register_stackbottom = (ptr_t)(sb -> reg_base);
1911
#     endif
1912
      return;
×
1913
    }
1914

1915
    GC_ASSERT(I_HOLD_LOCK());
42✔
1916
    if (NULL == t) /* current thread? */
42✔
1917
      t = GC_self_thread_inner();
×
1918
    GC_ASSERT(!KNOWN_FINISHED(t));
42✔
1919
    crtn = t -> crtn;
42✔
1920
    GC_ASSERT((t -> flags & DO_BLOCKING) == 0
42✔
1921
              && NULL == crtn -> traced_stack_sect); /* for now */
1922

1923
    crtn -> stack_end = (ptr_t)(sb -> mem_base);
42✔
1924
#   ifdef IA64
1925
      crtn -> backing_store_end = (ptr_t)(sb -> reg_base);
1926
#   endif
1927
#   ifdef GC_WIN32_THREADS
1928
      /* Reset the known minimum (hottest address in the stack). */
1929
      crtn -> last_stack_min = ADDR_LIMIT;
1930
#   endif
1931
}
1932

1933
GC_API void * GC_CALL GC_get_my_stackbottom(struct GC_stack_base *sb)
42✔
1934
{
1935
    GC_thread me;
1936
    GC_stack_context_t crtn;
1937

1938
    LOCK();
42✔
1939
    me = GC_self_thread_inner();
42✔
1940
    /* The thread is assumed to be registered.  */
1941
    crtn = me -> crtn;
42✔
1942
    sb -> mem_base = crtn -> stack_end;
42✔
1943
#   ifdef IA64
1944
      sb -> reg_base = crtn -> backing_store_end;
1945
#   endif
1946
#   ifdef E2K
1947
      sb -> reg_base = NULL;
1948
#   endif
1949
    UNLOCK();
42✔
1950
    return (void *)me; /* gc_thread_handle */
42✔
1951
}
1952

1953
/* GC_call_with_gc_active() has the opposite to GC_do_blocking()        */
1954
/* functionality.  It might be called from a user function invoked by   */
1955
/* GC_do_blocking() to temporarily back allow calling any GC function   */
1956
/* and/or manipulating pointers to the garbage collected heap.          */
1957
GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
84✔
1958
                                             void * client_data)
1959
{
1960
    struct GC_traced_stack_sect_s stacksect;
1961
    GC_thread me;
1962
    GC_stack_context_t crtn;
1963
    ptr_t stack_end;
1964
#   ifdef E2K
1965
      size_t stack_size;
1966
#   endif
1967

1968
    LOCK();   /* This will block if the world is stopped.       */
84✔
1969
    me = GC_self_thread_inner();
84✔
1970
    crtn = me -> crtn;
84✔
1971

1972
    /* Adjust our stack bottom value (this could happen unless  */
1973
    /* GC_get_stack_base() was used which returned GC_SUCCESS). */
1974
    stack_end = crtn -> stack_end; /* read of a volatile field */
84✔
1975
    GC_ASSERT(stack_end != NULL);
84✔
1976
    if ((word)stack_end HOTTER_THAN (word)(&stacksect)) {
84✔
1977
      crtn -> stack_end = (ptr_t)(&stacksect);
×
1978
#     if defined(I386) && defined(GC_WIN32_THREADS)
1979
        crtn -> initial_stack_base = (ptr_t)(&stacksect);
1980
#     endif
1981
    }
1982

1983
    if ((me -> flags & DO_BLOCKING) == 0) {
84✔
1984
      /* We are not inside GC_do_blocking() - do nothing more.  */
1985
      UNLOCK();
×
1986
      client_data = fn(client_data);
×
1987
      /* Prevent treating the above as a tail call.     */
1988
      GC_noop1(COVERT_DATAFLOW(&stacksect));
×
1989
      return client_data; /* result */
84✔
1990
    }
1991

1992
#   if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD)
1993
      while (EXPECT((me -> ext_suspend_cnt & 1) != 0, FALSE)) {
168✔
1994
        word suspend_cnt = (word)(me -> ext_suspend_cnt);
×
1995
        UNLOCK();
×
1996
        GC_suspend_self_inner(me, suspend_cnt);
×
1997
        LOCK();
×
1998
        GC_ASSERT(me -> crtn == crtn);
×
1999
      }
2000
#   endif
2001

2002
    /* Setup new "stack section".       */
2003
    stacksect.saved_stack_ptr = crtn -> stack_ptr;
84✔
2004
#   ifdef IA64
2005
      /* This is the same as in GC_call_with_stack_base().      */
2006
      stacksect.backing_store_end = GC_save_regs_in_stack();
2007
      /* Unnecessarily flushes register stack,          */
2008
      /* but that probably doesn't hurt.                */
2009
      stacksect.saved_backing_store_ptr = crtn -> backing_store_ptr;
2010
#   elif defined(E2K)
2011
      GC_ASSERT(crtn -> backing_store_end != NULL);
2012
      GC_INTERNAL_FREE(crtn -> backing_store_end);
2013
      crtn -> backing_store_ptr = NULL;
2014
      crtn -> backing_store_end = NULL;
2015
#   endif
2016
    stacksect.prev = crtn -> traced_stack_sect;
84✔
2017
    me -> flags &= ~DO_BLOCKING;
84✔
2018
    crtn -> traced_stack_sect = &stacksect;
84✔
2019

2020
    UNLOCK();
84✔
2021
    client_data = fn(client_data);
84✔
2022
    GC_ASSERT((me -> flags & DO_BLOCKING) == 0);
84✔
2023

2024
    /* Restore original "stack section".        */
2025
#   ifdef E2K
2026
      (void)GC_save_regs_in_stack();
2027
#   endif
2028
    LOCK();
84✔
2029
    GC_ASSERT(me -> crtn == crtn);
84✔
2030
    GC_ASSERT(crtn -> traced_stack_sect == &stacksect);
84✔
2031
#   ifdef CPPCHECK
2032
      GC_noop1((word)(crtn -> traced_stack_sect));
2033
#   endif
2034
    crtn -> traced_stack_sect = stacksect.prev;
84✔
2035
#   ifdef IA64
2036
      crtn -> backing_store_ptr = stacksect.saved_backing_store_ptr;
2037
#   elif defined(E2K)
2038
      GC_ASSERT(NULL == crtn -> backing_store_end);
2039
      stack_size = GC_alloc_and_get_procedure_stack(
2040
                                        &(crtn -> backing_store_end));
2041
      crtn -> backing_store_ptr = crtn -> backing_store_end + stack_size;
2042
#   endif
2043
    me -> flags |= DO_BLOCKING;
84✔
2044
    crtn -> stack_ptr = stacksect.saved_stack_ptr;
84✔
2045
    UNLOCK();
84✔
2046

2047
    return client_data; /* result */
84✔
2048
}
2049

2050
STATIC void GC_unregister_my_thread_inner(GC_thread me)
43,138✔
2051
{
2052
    GC_ASSERT(I_HOLD_LOCK());
43,138✔
2053
#   ifdef DEBUG_THREADS
2054
      GC_log_printf("Unregistering thread %p, gc_thread= %p, n_threads= %d\n",
2055
                    (void *)(signed_word)(me -> id), (void *)me,
2056
                    GC_count_threads());
2057
#   endif
2058
    GC_ASSERT(!KNOWN_FINISHED(me));
43,138✔
2059
#   if defined(THREAD_LOCAL_ALLOC)
2060
      GC_ASSERT(GC_getspecific(GC_thread_key) == &me->tlfs);
43,138✔
2061
      GC_destroy_thread_local(&me->tlfs);
43,138✔
2062
#   endif
2063
#   ifdef NACL
2064
      GC_nacl_shutdown_gc_thread();
2065
#   endif
2066
#   ifdef GC_PTHREADS
2067
#     if defined(GC_HAVE_PTHREAD_EXIT) || !defined(GC_NO_PTHREAD_CANCEL)
2068
        /* Handle DISABLED_GC flag which is set by the  */
2069
        /* intercepted pthread_cancel or pthread_exit.  */
2070
        if ((me -> flags & DISABLED_GC) != 0) {
43,138✔
2071
          GC_dont_gc--;
1,175✔
2072
        }
2073
#     endif
2074
      if ((me -> flags & DETACHED) == 0) {
43,138✔
2075
          me -> flags |= FINISHED;
2,541✔
2076
      } else
2077
#   endif
2078
    /* else */ {
2079
      GC_delete_thread(me);
40,597✔
2080
    }
2081
#   if defined(THREAD_LOCAL_ALLOC)
2082
      /* It is required to call remove_specific defined in specific.c. */
2083
      GC_remove_specific(GC_thread_key);
43,138✔
2084
#   endif
2085
}
43,138✔
2086

2087
GC_API int GC_CALL GC_unregister_my_thread(void)
20,206✔
2088
{
2089
    GC_thread me;
2090
    IF_CANCEL(int cancel_state;)
2091

2092
    /* Client should not unregister the thread explicitly if it */
2093
    /* is registered by DllMain, except for the main thread.    */
2094
#   if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
2095
      GC_ASSERT(!GC_win32_dll_threads
2096
                || GC_main_thread_id == thread_id_self());
2097
#   endif
2098

2099
    LOCK();
20,206✔
2100
    DISABLE_CANCEL(cancel_state);
20,209✔
2101
    /* Wait for any GC that may be marking from our stack to    */
2102
    /* complete before we remove this thread.                   */
2103
    GC_wait_for_gc_completion(FALSE);
20,215✔
2104
    me = GC_self_thread_inner();
20,215✔
2105
#   ifdef DEBUG_THREADS
2106
        GC_log_printf(
2107
                "Called GC_unregister_my_thread on %p, gc_thread= %p\n",
2108
                (void *)(signed_word)thread_id_self(), (void *)me);
2109
#   endif
2110
    GC_ASSERT(THREAD_ID_EQUAL(me -> id, thread_id_self()));
20,215✔
2111
    GC_unregister_my_thread_inner(me);
20,215✔
2112
    RESTORE_CANCEL(cancel_state);
20,215✔
2113
    UNLOCK();
20,215✔
2114
    return GC_SUCCESS;
20,215✔
2115
}
2116

2117
#if !defined(GC_NO_PTHREAD_CANCEL) && defined(GC_PTHREADS)
2118
  /* We should deal with the fact that apparently on Solaris and,       */
2119
  /* probably, on some Linux we can't collect while a thread is         */
2120
  /* exiting, since signals aren't handled properly.  This currently    */
2121
  /* gives rise to deadlocks.  The only workaround seen is to intercept */
2122
  /* pthread_cancel() and pthread_exit(), and disable the collections   */
2123
  /* until the thread exit handler is called.  That's ugly, because we  */
2124
  /* risk growing the heap unnecessarily. But it seems that we don't    */
2125
  /* really have an option in that the process is not in a fully        */
2126
  /* functional state while a thread is exiting.                        */
2127
  GC_API int WRAP_FUNC(pthread_cancel)(pthread_t thread)
588✔
2128
  {
2129
#   ifdef CANCEL_SAFE
2130
      GC_thread t;
2131
#   endif
2132

2133
    INIT_REAL_SYMS();
2134
#   ifdef CANCEL_SAFE
2135
      LOCK();
588✔
2136
      t = GC_lookup_by_pthread(thread);
588✔
2137
      /* We test DISABLED_GC because pthread_exit could be called at    */
2138
      /* the same time.  (If t is NULL then pthread_cancel should       */
2139
      /* return ESRCH.)                                                 */
2140
      if (t != NULL && (t -> flags & DISABLED_GC) == 0) {
588✔
2141
        t -> flags |= DISABLED_GC;
588✔
2142
        GC_dont_gc++;
588✔
2143
      }
2144
      UNLOCK();
588✔
2145
#   endif
2146
    return REAL_FUNC(pthread_cancel)(thread);
588✔
2147
  }
2148
#endif /* !GC_NO_PTHREAD_CANCEL */
2149

2150
#ifdef GC_HAVE_PTHREAD_EXIT
2151
  GC_API GC_PTHREAD_EXIT_ATTRIBUTE void WRAP_FUNC(pthread_exit)(void *retval)
882✔
2152
  {
2153
    GC_thread me;
2154

2155
    INIT_REAL_SYMS();
2156
    LOCK();
882✔
2157
    me = GC_self_thread_inner();
882✔
2158
    /* We test DISABLED_GC because someone else could call    */
2159
    /* pthread_cancel at the same time.                       */
2160
    if (me != NULL && (me -> flags & DISABLED_GC) == 0) {
882✔
2161
      me -> flags |= DISABLED_GC;
587✔
2162
      GC_dont_gc++;
587✔
2163
    }
2164
    UNLOCK();
882✔
2165

2166
    REAL_FUNC(pthread_exit)(retval);
882✔
2167
  }
2168
#endif /* GC_HAVE_PTHREAD_EXIT */
2169

2170
GC_API void GC_CALL GC_allow_register_threads(void)
2✔
2171
{
2172
  /* Check GC is initialized and the current thread is registered.  */
2173
  GC_ASSERT(GC_self_thread() != NULL);
2✔
2174

2175
  INIT_REAL_SYMS(); /* to initialize symbols while single-threaded */
2176
  GC_start_mark_threads();
2✔
2177
  set_need_to_lock();
2✔
2178
}
2✔
2179

2180
GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
20,455✔
2181
{
2182
    GC_thread me;
2183

2184
    if (GC_need_to_lock == FALSE)
20,455✔
2185
        ABORT("Threads explicit registering is not previously enabled");
×
2186

2187
    /* We lock here, since we want to wait for an ongoing GC.   */
2188
    LOCK();
20,455✔
2189
    me = GC_self_thread_inner();
20,455✔
2190
    if (EXPECT(NULL == me, TRUE)) {
20,455✔
2191
      me = GC_register_my_thread_inner(sb, thread_id_self());
20,234✔
2192
#     ifdef GC_PTHREADS
2193
#       ifdef CPPCHECK
2194
          GC_noop1(me -> flags);
2195
#       endif
2196
        /* Treat as detached, since we do not need to worry about       */
2197
        /* pointer results.                                             */
2198
        me -> flags |= DETACHED;
20,234✔
2199
#     else
2200
        (void)me;
2201
#     endif
2202
    } else
2203
#   ifdef GC_PTHREADS
2204
      /* else */ if (KNOWN_FINISHED(me)) {
221✔
2205
        /* This code is executed when a thread is registered from the   */
2206
        /* client thread key destructor.                                */
2207
#       ifdef NACL
2208
          GC_nacl_initialize_gc_thread(me);
2209
#       endif
2210
#       ifdef GC_DARWIN_THREADS
2211
          /* Reinitialize mach_thread to avoid thread_suspend fail      */
2212
          /* with MACH_SEND_INVALID_DEST error.                         */
2213
          me -> mach_thread = mach_thread_self();
2214
#       endif
2215
        GC_record_stack_base(me -> crtn, sb);
221✔
2216
        me -> flags &= ~FINISHED; /* but not DETACHED */
221✔
2217
      } else
2218
#   endif
2219
    /* else */ {
2220
        UNLOCK();
×
2221
        return GC_DUPLICATE;
×
2222
    }
2223

2224
#   ifdef THREAD_LOCAL_ALLOC
2225
      GC_init_thread_local(&me->tlfs);
20,455✔
2226
#   endif
2227
#   ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
2228
      /* Since this could be executed from a thread destructor, */
2229
      /* our signals might already be blocked.                  */
2230
      GC_unblock_gc_signals();
2231
#   endif
2232
#   if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD)
2233
      if (EXPECT((me -> ext_suspend_cnt & 1) != 0, FALSE)) {
20,455✔
2234
        GC_with_callee_saves_pushed(GC_suspend_self_blocked, (ptr_t)me);
×
2235
      }
2236
#   endif
2237
    UNLOCK();
20,455✔
2238
    return GC_SUCCESS;
20,455✔
2239
}
2240

2241
#if defined(GC_PTHREADS) \
2242
    && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
2243

2244
  /* Called at thread exit.  Never called for main thread.      */
2245
  /* That is OK, since it results in at most a tiny one-time    */
2246
  /* leak.  And linuxthreads implementation does not reclaim    */
2247
  /* the primordial (main) thread resources or id anyway.       */
2248
  GC_INNER_PTHRSTART void GC_thread_exit_proc(void *arg)
22,921✔
2249
  {
2250
    GC_thread me = (GC_thread)arg;
22,921✔
2251
    IF_CANCEL(int cancel_state;)
2252

2253
#   ifdef DEBUG_THREADS
2254
        GC_log_printf("Called GC_thread_exit_proc on %p, gc_thread= %p\n",
2255
                      (void *)(signed_word)(me -> id), (void *)me);
2256
#   endif
2257
    LOCK();
22,921✔
2258
    DISABLE_CANCEL(cancel_state);
22,923✔
2259
    GC_wait_for_gc_completion(FALSE);
22,923✔
2260
    GC_unregister_my_thread_inner(me);
22,923✔
2261
    RESTORE_CANCEL(cancel_state);
22,923✔
2262
    UNLOCK();
22,923✔
2263
  }
22,923✔
2264

2265
  GC_API int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
2,298✔
2266
  {
2267
    int result;
2268
    GC_thread t;
2269

2270
    INIT_REAL_SYMS();
2271
#   ifdef DEBUG_THREADS
2272
      GC_log_printf("thread %p is joining thread %p\n",
2273
                    (void *)GC_PTHREAD_PTRVAL(pthread_self()),
2274
                    (void *)GC_PTHREAD_PTRVAL(thread));
2275
#   endif
2276

2277
    /* After the join, thread id may have been recycled.                */
2278
    LOCK();
2,298✔
2279
    t = (GC_thread)COVERT_DATAFLOW(GC_lookup_by_pthread(thread));
2,298✔
2280
      /* This is guaranteed to be the intended one, since the thread id */
2281
      /* cannot have been recycled by pthreads.                         */
2282
    UNLOCK();
2,298✔
2283

2284
    result = REAL_FUNC(pthread_join)(thread, retval);
2,298✔
2285
#   if defined(GC_FREEBSD_THREADS)
2286
      /* On FreeBSD, the wrapped pthread_join() sometimes returns       */
2287
      /* (what appears to be) a spurious EINTR which caused the test    */
2288
      /* and real code to fail gratuitously.  Having looked at system   */
2289
      /* pthread library source code, I see how such return code value  */
2290
      /* may be generated.  In one path of the code, pthread_join just  */
2291
      /* returns the errno setting of the thread being joined - this    */
2292
      /* does not match the POSIX specification or the local man pages. */
2293
      /* Thus, I have taken the liberty to catch this one spurious      */
2294
      /* return value.                                                  */
2295
      if (EXPECT(result == EINTR, FALSE)) result = 0;
2296
#   endif
2297

2298
    if (EXPECT(0 == result, TRUE)) {
2,298✔
2299
      LOCK();
2,298✔
2300
      /* Here the pthread id may have been recycled.  Delete the thread */
2301
      /* from GC_threads (unless it has been registered again from the  */
2302
      /* client thread key destructor).                                 */
2303
      if (KNOWN_FINISHED(t)) {
2,298✔
2304
        GC_delete_thread(t);
2,296✔
2305
      }
2306
      UNLOCK();
2,298✔
2307
    }
2308

2309
#   ifdef DEBUG_THREADS
2310
      GC_log_printf("thread %p join with thread %p %s\n",
2311
                    (void *)GC_PTHREAD_PTRVAL(pthread_self()),
2312
                    (void *)GC_PTHREAD_PTRVAL(thread),
2313
                    result != 0 ? "failed" : "succeeded");
2314
#   endif
2315
    return result;
2,298✔
2316
  }
2317

2318
  GC_API int WRAP_FUNC(pthread_detach)(pthread_t thread)
410✔
2319
  {
2320
    int result;
2321
    GC_thread t;
2322

2323
    INIT_REAL_SYMS();
2324
    LOCK();
410✔
2325
    t = (GC_thread)COVERT_DATAFLOW(GC_lookup_by_pthread(thread));
410✔
2326
    UNLOCK();
410✔
2327
    result = REAL_FUNC(pthread_detach)(thread);
410✔
2328
    if (EXPECT(0 == result, TRUE)) {
410✔
2329
      LOCK();
410✔
2330
      /* Here the pthread id may have been recycled.    */
2331
      if (KNOWN_FINISHED(t)) {
410✔
2332
        GC_delete_thread(t);
23✔
2333
      } else {
2334
        t -> flags |= DETACHED;
387✔
2335
      }
2336
      UNLOCK();
410✔
2337
    }
2338
    return result;
410✔
2339
  }
2340

2341
  struct start_info {
2342
    void *(*start_routine)(void *);
2343
    void *arg;
2344
    sem_t registered;           /* 1 ==> in our thread table, but       */
2345
                                /* parent hasn't yet noticed.           */
2346
    unsigned char flags;
2347
  };
2348

2349
  /* Called from GC_pthread_start_inner().  Defined in this file to     */
2350
  /* minimize the number of include files in pthread_start.c (because   */
2351
  /* sem_t and sem_post() are not used in that file directly).          */
2352
  GC_INNER_PTHRSTART GC_thread GC_start_rtn_prepare_thread(
22,917✔
2353
                                        void *(**pstart)(void *),
2354
                                        void **pstart_arg,
2355
                                        struct GC_stack_base *sb, void *arg)
2356
  {
2357
    struct start_info *psi = (struct start_info *)arg;
22,917✔
2358
    thread_id_t self_id = thread_id_self();
22,917✔
2359
    GC_thread me;
2360

2361
#   ifdef DEBUG_THREADS
2362
      GC_log_printf("Starting thread %p, sp= %p\n",
2363
                    (void *)GC_PTHREAD_PTRVAL(pthread_self()), (void *)&arg);
2364
#   endif
2365
    /* If a GC occurs before the thread is registered, that GC will     */
2366
    /* ignore this thread.  That's fine, since it will block trying to  */
2367
    /* acquire the allocation lock, and won't yet hold interesting      */
2368
    /* pointers.                                                        */
2369
    LOCK();
22,921✔
2370
    /* We register the thread here instead of in the parent, so that    */
2371
    /* we don't need to hold the allocation lock during pthread_create. */
2372
    me = GC_register_my_thread_inner(sb, self_id);
22,925✔
2373
    GC_ASSERT(me != &first_thread);
22,924✔
2374
    me -> flags = psi -> flags;
22,924✔
2375
#   ifdef GC_WIN32_THREADS
2376
      GC_win32_cache_self_pthread(self_id);
2377
#   endif
2378
#   ifdef THREAD_LOCAL_ALLOC
2379
      GC_init_thread_local(&me->tlfs);
22,924✔
2380
#   endif
2381
    UNLOCK();
22,924✔
2382

2383
    *pstart = psi -> start_routine;
22,924✔
2384
    *pstart_arg = psi -> arg;
22,924✔
2385
#   ifdef DEBUG_THREADS
2386
      GC_log_printf("start_routine= %p\n", (void *)(signed_word)(*pstart));
2387
#   endif
2388
    sem_post(&(psi -> registered));     /* Last action on *psi; */
22,924✔
2389
                                        /* OK to deallocate.    */
2390
    return me;
22,924✔
2391
  }
2392

2393
  STATIC void * GC_pthread_start(void * arg)
22,921✔
2394
  {
2395
#   ifdef INCLUDE_LINUX_THREAD_DESCR
2396
      struct GC_stack_base sb;
2397

2398
#     ifdef REDIRECT_MALLOC
2399
        /* GC_get_stack_base may call pthread_getattr_np, which can     */
2400
        /* unfortunately call realloc, which may allocate from an       */
2401
        /* unregistered thread.  This is unpleasant, since it might     */
2402
        /* force heap growth (or, even, heap overflow).                 */
2403
        GC_disable();
2404
#     endif
2405
      if (GC_get_stack_base(&sb) != GC_SUCCESS)
2406
        ABORT("Failed to get thread stack base");
2407
#     ifdef REDIRECT_MALLOC
2408
        GC_enable();
2409
#     endif
2410
      return GC_pthread_start_inner(&sb, arg);
2411
#   else
2412
      return GC_call_with_stack_base(GC_pthread_start_inner, arg);
22,921✔
2413
#   endif
2414
  }
2415

2416
  GC_API int WRAP_FUNC(pthread_create)(pthread_t *new_thread,
23,162✔
2417
                       GC_PTHREAD_CREATE_CONST pthread_attr_t *attr,
2418
                       void *(*start_routine)(void *), void *arg)
2419
  {
2420
    int result;
2421
    struct start_info si;
2422

2423
    GC_ASSERT(I_DONT_HOLD_LOCK());
23,162✔
2424
    INIT_REAL_SYMS();
2425
    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
23,162✔
2426
    GC_ASSERT(GC_thr_initialized);
23,163✔
2427

2428
    if (sem_init(&si.registered, GC_SEM_INIT_PSHARED, 0) != 0)
23,163✔
2429
        ABORT("sem_init failed");
×
2430
    si.flags = 0;
23,161✔
2431
    si.start_routine = start_routine;
23,161✔
2432
    si.arg = arg;
23,161✔
2433

2434
    /* We resist the temptation to muck with the stack size here,       */
2435
    /* even if the default is unreasonably small.  That is the client's */
2436
    /* responsibility.                                                  */
2437
#   ifdef GC_ASSERTIONS
2438
      {
2439
        size_t stack_size = 0;
23,161✔
2440
        if (NULL != attr) {
23,161✔
2441
          if (pthread_attr_getstacksize(attr, &stack_size) != 0)
20,495✔
2442
            ABORT("pthread_attr_getstacksize failed");
×
2443
        }
2444
        if (0 == stack_size) {
23,161✔
2445
          pthread_attr_t my_attr;
2446

2447
          if (pthread_attr_init(&my_attr) != 0)
2,668✔
2448
            ABORT("pthread_attr_init failed");
×
2449
          if (pthread_attr_getstacksize(&my_attr, &stack_size) != 0)
2,668✔
2450
            ABORT("pthread_attr_getstacksize failed");
×
2451
          (void)pthread_attr_destroy(&my_attr);
2,668✔
2452
        }
2453
        /* On Solaris 10 and on Win32 with winpthreads, with the        */
2454
        /* default attr initialization, stack_size remains 0; fudge it. */
2455
        if (EXPECT(0 == stack_size, FALSE)) {
23,161✔
2456
#           if !defined(SOLARIS) && !defined(GC_WIN32_PTHREADS)
2457
              WARN("Failed to get stack size for assertion checking\n", 0);
×
2458
#           endif
2459
            stack_size = 1000000;
×
2460
        }
2461
        GC_ASSERT(stack_size >= 65536);
23,161✔
2462
        /* Our threads may need to do some work for the GC.     */
2463
        /* Ridiculously small threads won't work, and they      */
2464
        /* probably wouldn't work anyway.                       */
2465
      }
2466
#   endif
2467

2468
    if (attr != NULL) {
23,161✔
2469
        int detachstate;
2470

2471
        if (pthread_attr_getdetachstate(attr, &detachstate) != 0)
20,493✔
2472
            ABORT("pthread_attr_getdetachstate failed");
×
2473
        if (PTHREAD_CREATE_DETACHED == detachstate)
20,495✔
2474
          si.flags |= DETACHED;
20,455✔
2475
    }
2476

2477
#   ifdef PARALLEL_MARK
2478
      if (EXPECT(!GC_parallel && GC_available_markers_m1 > 0, FALSE))
23,163✔
2479
        GC_start_mark_threads();
10✔
2480
#   endif
2481
#   ifdef DEBUG_THREADS
2482
      GC_log_printf("About to start new thread from thread %p\n",
2483
                    (void *)GC_PTHREAD_PTRVAL(pthread_self()));
2484
#   endif
2485
    set_need_to_lock();
23,163✔
2486
    result = REAL_FUNC(pthread_create)(new_thread, attr,
23,163✔
2487
                                       GC_pthread_start, &si);
2488

2489
    /* Wait until child has been added to the thread table.             */
2490
    /* This also ensures that we hold onto the stack-allocated si       */
2491
    /* until the child is done with it.                                 */
2492
    if (EXPECT(0 == result, TRUE)) {
22,924✔
2493
        IF_CANCEL(int cancel_state;)
2494

2495
        DISABLE_CANCEL(cancel_state);
22,929✔
2496
                /* pthread_create is not a cancellation point.  */
2497
        while (0 != sem_wait(&si.registered)) {
45,856✔
2498
#           if defined(GC_HAIKU_THREADS)
2499
              /* To workaround some bug in Haiku semaphores.    */
2500
              if (EACCES == errno) continue;
2501
#           endif
2502
            if (EINTR != errno) ABORT("sem_wait failed");
×
2503
        }
2504
        RESTORE_CANCEL(cancel_state);
22,919✔
2505
    }
2506
    sem_destroy(&si.registered);
22,915✔
2507
    return result;
22,920✔
2508
  }
2509

2510
#endif /* GC_PTHREADS && !SN_TARGET_ORBIS && !SN_TARGET_PSP2 */
2511

2512
#if ((defined(GC_PTHREADS_PARAMARK) || defined(USE_PTHREAD_LOCKS)) \
2513
     && !defined(NO_PTHREAD_TRYLOCK)) || defined(USE_SPIN_LOCK)
2514
  /* Spend a few cycles in a way that can't introduce contention with   */
2515
  /* other threads.                                                     */
2516
# define GC_PAUSE_SPIN_CYCLES 10
2517
  STATIC void GC_pause(void)
118,469,346✔
2518
  {
2519
    int i;
2520

2521
    for (i = 0; i < GC_PAUSE_SPIN_CYCLES; ++i) {
1,298,435,036✔
2522
        /* Something that's unlikely to be optimized away. */
2523
#     if defined(AO_HAVE_compiler_barrier) \
2524
         && !defined(BASE_ATOMIC_OPS_EMULATED)
2525
        AO_compiler_barrier();
1,179,965,690✔
2526
#     else
2527
        GC_noop1(i);
2528
#     endif
2529
    }
2530
  }
118,469,346✔
2531
#endif /* USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK */
2532

2533
#ifndef SPIN_MAX
2534
# define SPIN_MAX 128   /* Maximum number of calls to GC_pause before   */
2535
                        /* give up.                                     */
2536
#endif
2537

2538
#if (!defined(USE_SPIN_LOCK) && !defined(NO_PTHREAD_TRYLOCK) \
2539
     && defined(USE_PTHREAD_LOCKS)) || defined(GC_PTHREADS_PARAMARK)
2540
  /* If we do not want to use the below spinlock implementation, either */
2541
  /* because we don't have a GC_test_and_set implementation, or because */
2542
  /* we don't want to risk sleeping, we can still try spinning on       */
2543
  /* pthread_mutex_trylock for a while.  This appears to be very        */
2544
  /* beneficial in many cases.                                          */
2545
  /* I suspect that under high contention this is nearly always better  */
2546
  /* than the spin lock.  But it is a bit slower on a uniprocessor.     */
2547
  /* Hence we still default to the spin lock.                           */
2548
  /* This is also used to acquire the mark lock for the parallel        */
2549
  /* marker.                                                            */
2550

2551
  /* Here we use a strict exponential backoff scheme.  I don't know     */
2552
  /* whether that's better or worse than the above.  We eventually      */
2553
  /* yield by calling pthread_mutex_lock(); it never makes sense to     */
2554
  /* explicitly sleep.                                                  */
2555

2556
# ifdef LOCK_STATS
2557
    /* Note that LOCK_STATS requires AO_HAVE_test_and_set.      */
2558
    volatile AO_t GC_spin_count = 0;
2559
    volatile AO_t GC_block_count = 0;
2560
    volatile AO_t GC_unlocked_count = 0;
2561
# endif
2562

2563
  STATIC void GC_generic_lock(pthread_mutex_t * lock)
88,834,658✔
2564
  {
2565
#   ifndef NO_PTHREAD_TRYLOCK
2566
      unsigned pause_length = 1;
88,834,658✔
2567
      unsigned i;
2568

2569
      if (EXPECT(0 == pthread_mutex_trylock(lock), TRUE)) {
88,834,658✔
2570
#       ifdef LOCK_STATS
2571
            (void)AO_fetch_and_add1(&GC_unlocked_count);
2572
#       endif
2573
        return;
84,411,978✔
2574
      }
2575
      for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
10,584,247✔
2576
         for (i = 0; i < pause_length; ++i) {
128,976,563✔
2577
            GC_pause();
118,462,296✔
2578
        }
2579
        switch (pthread_mutex_trylock(lock)) {
10,514,267✔
2580
            case 0:
2581
#               ifdef LOCK_STATS
2582
                    (void)AO_fetch_and_add1(&GC_spin_count);
2583
#               endif
2584
                return;
4,563,223✔
2585
            case EBUSY:
2586
                break;
5,950,705✔
2587
            default:
2588
                ABORT("Unexpected error from pthread_mutex_trylock");
×
2589
        }
2590
      }
2591
#   endif /* !NO_PTHREAD_TRYLOCK */
2592
#   ifdef LOCK_STATS
2593
        (void)AO_fetch_and_add1(&GC_block_count);
2594
#   endif
2595
    pthread_mutex_lock(lock);
69,971✔
2596
  }
2597
#endif /* !USE_SPIN_LOCK || ... */
2598

2599
#if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
2600
  GC_INNER volatile unsigned char GC_collecting = FALSE;
2601
                        /* A hint that we are in the collector and      */
2602
                        /* holding the allocation lock for an           */
2603
                        /* extended period.                             */
2604

2605
# if defined(AO_HAVE_char_load) && !defined(BASE_ATOMIC_OPS_EMULATED)
2606
#   define is_collecting() ((GC_bool)AO_char_load(&GC_collecting))
2607
# else
2608
    /* GC_collecting is a hint, a potential data race between   */
2609
    /* GC_lock() and ENTER/EXIT_GC() is OK to ignore.           */
2610
#   define is_collecting() ((GC_bool)GC_collecting)
2611
# endif
2612
#endif /* GC_PTHREADS && !GC_WIN32_THREADS */
2613

2614
#ifdef GC_ASSERTIONS
2615
  GC_INNER unsigned long GC_lock_holder = NO_THREAD;
2616
#endif
2617

2618
#if defined(USE_SPIN_LOCK)
2619
  /* Reasonably fast spin locks.  Basically the same implementation     */
2620
  /* as STL alloc.h.  This isn't really the right way to do this.       */
2621
  /* but until the POSIX scheduling mess gets straightened out ...      */
2622

2623
  GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
2624

2625
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor  */
2626
# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor   */
2627

2628
  static volatile AO_t spin_max = low_spin_max;
2629
  static volatile AO_t last_spins = 0;
2630
                                /* A potential data race between        */
2631
                                /* threads invoking GC_lock which reads */
2632
                                /* and updates spin_max and last_spins  */
2633
                                /* could be ignored because these       */
2634
                                /* variables are hints only.            */
2635

2636
  GC_INNER void GC_lock(void)
2637
  {
2638
    unsigned my_spin_max;
2639
    unsigned my_last_spins;
2640
    unsigned i;
2641

2642
    if (EXPECT(AO_test_and_set_acquire(&GC_allocate_lock)
2643
                == AO_TS_CLEAR, TRUE)) {
2644
        return;
2645
    }
2646
    my_spin_max = (unsigned)AO_load(&spin_max);
2647
    my_last_spins = (unsigned)AO_load(&last_spins);
2648
    for (i = 0; i < my_spin_max; i++) {
2649
        if (is_collecting() || GC_nprocs == 1)
2650
          goto yield;
2651
        if (i < my_last_spins/2) {
2652
            GC_pause();
2653
            continue;
2654
        }
2655
        if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
2656
            /*
2657
             * got it!
2658
             * Spinning worked.  Thus we're probably not being scheduled
2659
             * against the other process with which we were contending.
2660
             * Thus it makes sense to spin longer the next time.
2661
             */
2662
            AO_store(&last_spins, (AO_t)i);
2663
            AO_store(&spin_max, (AO_t)high_spin_max);
2664
            return;
2665
        }
2666
    }
2667
    /* We are probably being scheduled against the other process.  Sleep. */
2668
    AO_store(&spin_max, (AO_t)low_spin_max);
2669
  yield:
2670
    for (i = 0;; ++i) {
2671
        if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
2672
            return;
2673
        }
2674
#       define SLEEP_THRESHOLD 12
2675
                /* Under Linux very short sleeps tend to wait until     */
2676
                /* the current time quantum expires.  On old Linux      */
2677
                /* kernels nanosleep (<= 2 ms) just spins.              */
2678
                /* (Under 2.4, this happens only for real-time          */
2679
                /* processes.)  We want to minimize both behaviors      */
2680
                /* here.                                                */
2681
        if (i < SLEEP_THRESHOLD) {
2682
            sched_yield();
2683
        } else {
2684
            struct timespec ts;
2685

2686
            if (i > 24) i = 24;
2687
                        /* Don't wait for more than about 15 ms,        */
2688
                        /* even under extreme contention.               */
2689
            ts.tv_sec = 0;
2690
            ts.tv_nsec = 1 << i;
2691
            nanosleep(&ts, 0);
2692
        }
2693
    }
2694
  }
2695

2696
#elif defined(USE_PTHREAD_LOCKS)
2697
  GC_INNER pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
2698

2699
# ifndef NO_PTHREAD_TRYLOCK
2700
    GC_INNER void GC_lock(void)
86,900,186✔
2701
    {
2702
      if (1 == GC_nprocs || is_collecting()) {
86,900,186✔
2703
        pthread_mutex_lock(&GC_allocate_ml);
412,548✔
2704
      } else {
2705
        GC_generic_lock(&GC_allocate_ml);
86,487,638✔
2706
      }
2707
    }
87,017,184✔
2708
# elif defined(GC_ASSERTIONS)
2709
    GC_INNER void GC_lock(void)
2710
    {
2711
      pthread_mutex_lock(&GC_allocate_ml);
2712
    }
2713
# endif
2714

2715
#endif /* !USE_SPIN_LOCK && USE_PTHREAD_LOCKS */
2716

2717
#ifdef GC_PTHREADS_PARAMARK
2718

2719
# if defined(GC_ASSERTIONS) && defined(GC_WIN32_THREADS) \
2720
     && !defined(USE_PTHREAD_LOCKS)
2721
#   define NUMERIC_THREAD_ID(id) (unsigned long)(word)GC_PTHREAD_PTRVAL(id)
2722
    /* Id not guaranteed to be unique. */
2723
# endif
2724

2725
# ifdef GC_ASSERTIONS
2726
    STATIC unsigned long GC_mark_lock_holder = NO_THREAD;
2727
#   define SET_MARK_LOCK_HOLDER \
2728
                (void)(GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()))
2729
#   define UNSET_MARK_LOCK_HOLDER \
2730
                do { \
2731
                  GC_ASSERT(GC_mark_lock_holder \
2732
                                == NUMERIC_THREAD_ID(pthread_self())); \
2733
                  GC_mark_lock_holder = NO_THREAD; \
2734
                } while (0)
2735
# else
2736
#   define SET_MARK_LOCK_HOLDER (void)0
2737
#   define UNSET_MARK_LOCK_HOLDER (void)0
2738
# endif /* !GC_ASSERTIONS */
2739

2740
  static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
2741

2742
# ifndef GC_WIN32_THREADS
2743
    static void setup_mark_lock(void)
32✔
2744
    {
2745
#     ifdef GLIBC_2_19_TSX_BUG
2746
        pthread_mutexattr_t mattr;
2747
        int glibc_minor = -1;
32✔
2748
        int glibc_major = GC_parse_version(&glibc_minor,
32✔
2749
                                           gnu_get_libc_version());
2750

2751
        if (glibc_major > 2 || (glibc_major == 2 && glibc_minor >= 19)) {
32✔
2752
          /* TODO: disable this workaround for glibc with fixed TSX */
2753
          /* This disables lock elision to workaround a bug in glibc 2.19+ */
2754
          if (0 != pthread_mutexattr_init(&mattr)) {
32✔
2755
            ABORT("pthread_mutexattr_init failed");
×
2756
          }
2757
          if (0 != pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_NORMAL)) {
32✔
2758
            ABORT("pthread_mutexattr_settype failed");
×
2759
          }
2760
          if (0 != pthread_mutex_init(&mark_mutex, &mattr)) {
32✔
2761
            ABORT("pthread_mutex_init failed");
×
2762
          }
2763
          (void)pthread_mutexattr_destroy(&mattr);
32✔
2764
        }
2765
#     endif
2766
    }
32✔
2767
# endif /* !GC_WIN32_THREADS */
2768

2769
  GC_INNER void GC_acquire_mark_lock(void)
2,530,196✔
2770
  {
2771
#   if defined(NUMERIC_THREAD_ID_UNIQUE) && !defined(THREAD_SANITIZER)
2772
      GC_ASSERT(GC_mark_lock_holder != NUMERIC_THREAD_ID(pthread_self()));
2,530,196✔
2773
#   endif
2774
    GC_generic_lock(&mark_mutex);
2,530,405✔
2775
    SET_MARK_LOCK_HOLDER;
2,531,703✔
2776
  }
2,531,703✔
2777

2778
  GC_INNER void GC_release_mark_lock(void)
2,531,647✔
2779
  {
2780
    UNSET_MARK_LOCK_HOLDER;
2,531,647✔
2781
    if (pthread_mutex_unlock(&mark_mutex) != 0) {
2,531,647✔
2782
        ABORT("pthread_mutex_unlock failed");
×
2783
    }
2784
  }
2,531,641✔
2785

2786
  /* Collector must wait for a freelist builders for 2 reasons:         */
2787
  /* 1) Mark bits may still be getting examined without lock.           */
2788
  /* 2) Partial free lists referenced only by locals may not be scanned */
2789
  /*    correctly, e.g. if they contain "pointer-free" objects, since   */
2790
  /*    the free-list link may be ignored.                              */
2791
  STATIC void GC_wait_builder(void)
400✔
2792
  {
2793
    ASSERT_CANCEL_DISABLED();
400✔
2794
    UNSET_MARK_LOCK_HOLDER;
400✔
2795
    if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
400✔
2796
        ABORT("pthread_cond_wait failed");
×
2797
    }
2798
    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
400✔
2799
    SET_MARK_LOCK_HOLDER;
400✔
2800
  }
400✔
2801

2802
  GC_INNER void GC_wait_for_reclaim(void)
16,236✔
2803
  {
2804
    GC_acquire_mark_lock();
16,236✔
2805
    while (GC_fl_builder_count > 0) {
32,872✔
2806
        GC_wait_builder();
400✔
2807
    }
2808
    GC_release_mark_lock();
16,236✔
2809
  }
16,236✔
2810

2811
# if defined(CAN_HANDLE_FORK) && defined(THREAD_SANITIZER)
2812
    /* Identical to GC_wait_for_reclaim() but with the no_sanitize      */
2813
    /* attribute as a workaround for TSan which does not notice that    */
2814
    /* the GC lock is acquired in fork_prepare_proc().                  */
2815
    GC_ATTR_NO_SANITIZE_THREAD
2816
    static void wait_for_reclaim_atfork(void)
2817
    {
2818
      GC_acquire_mark_lock();
2819
      while (GC_fl_builder_count > 0)
2820
        GC_wait_builder();
2821
      GC_release_mark_lock();
2822
    }
2823
# endif /* CAN_HANDLE_FORK && THREAD_SANITIZER */
2824

2825
  GC_INNER void GC_notify_all_builder(void)
975,378✔
2826
  {
2827
    GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
975,378✔
2828
    if (pthread_cond_broadcast(&builder_cv) != 0) {
975,378✔
2829
        ABORT("pthread_cond_broadcast failed");
×
2830
    }
2831
  }
975,378✔
2832

2833
  GC_INNER void GC_wait_marker(void)
18,737✔
2834
  {
2835
    ASSERT_CANCEL_DISABLED();
18,737✔
2836
    GC_ASSERT(GC_parallel);
18,737✔
2837
    UNSET_MARK_LOCK_HOLDER;
18,737✔
2838
    if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
18,737✔
2839
        ABORT("pthread_cond_wait failed");
×
2840
    }
2841
    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
18,681✔
2842
    SET_MARK_LOCK_HOLDER;
18,681✔
2843
  }
18,681✔
2844

2845
  GC_INNER void GC_notify_all_marker(void)
21,095✔
2846
  {
2847
    GC_ASSERT(GC_parallel);
21,095✔
2848
    if (pthread_cond_broadcast(&mark_cv) != 0) {
21,095✔
2849
        ABORT("pthread_cond_broadcast failed");
×
2850
    }
2851
  }
21,095✔
2852

2853
#endif /* GC_PTHREADS_PARAMARK */
2854

2855
#ifdef PTHREAD_REGISTER_CANCEL_WEAK_STUBS
2856
  /* Workaround "undefined reference" linkage errors on some targets. */
2857
  EXTERN_C_BEGIN
2858
  extern void __pthread_register_cancel(void) __attribute__((__weak__));
2859
  extern void __pthread_unregister_cancel(void) __attribute__((__weak__));
2860
  EXTERN_C_END
2861

2862
  void __pthread_register_cancel(void) {}
2863
  void __pthread_unregister_cancel(void) {}
2864
#endif
2865

2866
#endif /* THREADS */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc