• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1451

10 Mar 2023 05:47PM UTC coverage: 76.182% (-0.5%) from 76.638%
1451

push

travis-ci-com

ivmai
Change enable_threads default to off if threads are unsupported (CMake)

PR #535 (bdwgc).

In case of an absent multi-threading support for the target platform,
like for Emscripten or WASI, cmake build should not fail by default
(i.e. threads support should not be on by default).  This matches the
relevant behavior of configure one.

* CMakeLists.txt (default_enable_threads): New variable (ON by
default).
* CMakeLists.txt: Call find_package(Threads) quietly regardless of
enable_threads.
* CMakeLists.txt [EMSCRIPTEN || WASI || !(CMAKE_USE_PTHREADS_INIT ||
CMAKE_USE_WIN32_THREADS_INIT)] (default_enable_threads): Set to OFF.
* CMakeLists.txt (enable_threads): Specify the default value to
default_enable_threads.
* CMakeLists.txt: Check CMAKE_USE_PTHREADS_INIT and
CMAKE_USE_WIN32_THREADS_INIT only if enable_threads.

Co-authored-by: Ivan Maidanski <ivmai@mail.ru>

7734 of 10152 relevant lines covered (76.18%)

8264643.27 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.88
/thread_local_alloc.c
1
/*
2
 * Copyright (c) 2000-2005 by Hewlett-Packard Company.  All rights reserved.
3
 * Copyright (c) 2008-2022 Ivan Maidanski
4
 *
5
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
7
 *
8
 * Permission is hereby granted to use or copy this program
9
 * for any purpose, provided the above notices are retained on all copies.
10
 * Permission to modify the code and to distribute modified code is granted,
11
 * provided the above notices are retained, and a notice that the code was
12
 * modified is included with the above copyright notice.
13
 */
14

15
#include "private/gc_priv.h"
16

17
#if defined(THREAD_LOCAL_ALLOC)
18

19
#if !defined(THREADS) && !defined(CPPCHECK)
20
# error Invalid config - THREAD_LOCAL_ALLOC requires GC_THREADS
21
#endif
22

23
#include "private/thread_local_alloc.h"
24

25
#if defined(USE_COMPILER_TLS)
26
  __thread GC_ATTR_TLS_FAST
27
#elif defined(USE_WIN32_COMPILER_TLS)
28
  __declspec(thread) GC_ATTR_TLS_FAST
29
#endif
30
GC_key_t GC_thread_key;
31

32
static GC_bool keys_initialized;
33

34
/* Return a single nonempty freelist fl to the global one pointed to    */
35
/* by gfl.                                                              */
36

37
static void return_single_freelist(void *fl, void **gfl)
1,159✔
38
{
39
    if (*gfl == 0) {
1,159✔
40
      *gfl = fl;
1,023✔
41
    } else {
42
      void *q, **qptr;
43

44
      GC_ASSERT(GC_size(fl) == GC_size(*gfl));
136✔
45
      /* Concatenate: */
46
        qptr = &(obj_link(fl));
136✔
47
        while ((word)(q = *qptr) >= HBLKSIZE)
7,958✔
48
          qptr = &(obj_link(q));
7,686✔
49
        GC_ASSERT(0 == q);
136✔
50
        *qptr = *gfl;
136✔
51
        *gfl = fl;
136✔
52
    }
53
}
1,159✔
54

55
/* Recover the contents of the freelist array fl into the global one gfl. */
56
static void return_freelists(void **fl, void **gfl)
288,200✔
57
{
58
    int i;
59

60
    for (i = 1; i < TINY_FREELISTS; ++i) {
4,380,200✔
61
        if ((word)(fl[i]) >= HBLKSIZE) {
4,204,992✔
62
          return_single_freelist(fl[i], &gfl[i]);
1,159✔
63
        }
64
        /* Clear fl[i], since the thread structure may hang around.     */
65
        /* Do it in a way that is likely to trap if we access it.       */
66
        fl[i] = (ptr_t)HBLKSIZE;
4,092,000✔
67
    }
68
    /* The 0 granule freelist really contains 1 granule objects.        */
69
    if ((word)fl[0] >= HBLKSIZE
175,208✔
70
#       ifdef GC_GCJ_SUPPORT
71
          && fl[0] != ERROR_FL
43,802✔
72
#       endif
73
       ) {
74
        return_single_freelist(fl[0], &gfl[1]);
×
75
    }
76
}
175,208✔
77

78
#ifdef USE_PTHREAD_SPECIFIC
79
  /* Re-set the TLS value on thread cleanup to allow thread-local       */
80
  /* allocations to happen in the TLS destructors.                      */
81
  /* GC_unregister_my_thread (and similar routines) will finally set    */
82
  /* the GC_thread_key to NULL preventing this destructor from being    */
83
  /* called repeatedly.                                                 */
84
  static void reset_thread_key(void* v) {
85
    pthread_setspecific(GC_thread_key, v);
86
  }
87
#else
88
# define reset_thread_key 0
89
#endif
90

91
/* Each thread structure must be initialized.   */
92
/* This call must be made from the new thread.  */
93
GC_INNER void GC_init_thread_local(GC_tlfs p)
44,072✔
94
{
95
    int i, j, res;
96

97
    GC_ASSERT(I_HOLD_LOCK());
44,072✔
98
    if (!EXPECT(keys_initialized, TRUE)) {
44,072✔
99
#       ifdef USE_CUSTOM_SPECIFIC
100
          /* Ensure proper alignment of a "pushed" GC symbol.   */
101
          GC_ASSERT((word)(&GC_thread_key) % sizeof(word) == 0);
102
#       endif
103
        res = GC_key_create(&GC_thread_key, reset_thread_key);
32✔
104
        if (COVERT_DATAFLOW(res) != 0) {
32✔
105
            ABORT("Failed to create key for local allocator");
×
106
        }
107
        keys_initialized = TRUE;
32✔
108
    }
109
    res = GC_setspecific(GC_thread_key, p);
44,072✔
110
    if (COVERT_DATAFLOW(res) != 0) {
44,072✔
111
        ABORT("Failed to set thread specific allocation pointers");
×
112
    }
113
    for (j = 0; j < TINY_FREELISTS; ++j) {
1,145,872✔
114
        for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
4,407,200✔
115
            p -> _freelists[i][j] = (void *)(word)1;
3,305,400✔
116
        }
117
#       ifdef GC_GCJ_SUPPORT
118
            p -> gcj_freelists[j] = (void *)(word)1;
1,101,800✔
119
#       endif
120
    }
121
    /* The size 0 free lists are handled like the regular free lists,   */
122
    /* to ensure that the explicit deallocation works.  However,        */
123
    /* allocation of a size 0 "gcj" object is always an error.          */
124
#   ifdef GC_GCJ_SUPPORT
125
        p -> gcj_freelists[0] = ERROR_FL;
44,072✔
126
#   endif
127
}
44,072✔
128

129
GC_INNER void GC_destroy_thread_local(GC_tlfs p)
43,802✔
130
{
131
    int k;
132

133
    GC_ASSERT(I_HOLD_LOCK());
43,802✔
134
    /* We currently only do this from the thread itself.        */
135
    GC_STATIC_ASSERT(THREAD_FREELISTS_KINDS <= MAXOBJKINDS);
136
    for (k = 0; k < THREAD_FREELISTS_KINDS; ++k) {
175,208✔
137
        if (k == (int)GC_n_kinds)
131,406✔
138
            break; /* kind is not created */
×
139
        return_freelists(p -> _freelists[k], GC_obj_kinds[k].ok_freelist);
131,406✔
140
    }
141
#   ifdef GC_GCJ_SUPPORT
142
        return_freelists(p -> gcj_freelists, (void **)GC_gcjobjfreelist);
43,802✔
143
#   endif
144
}
43,802✔
145

146
STATIC void *GC_get_tlfs(void)
154,445,196✔
147
{
148
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
149
    GC_key_t k = GC_thread_key;
154,445,196✔
150

151
    if (EXPECT(0 == k, FALSE)) {
154,445,196✔
152
      /* We have not yet run GC_init_parallel.  That means we also  */
153
      /* are not locking, so GC_malloc_kind_global is fairly cheap. */
154
      return NULL;
×
155
    }
156
    return GC_getspecific(k);
154,445,196✔
157
# else
158
    if (EXPECT(!keys_initialized, FALSE)) return NULL;
159

160
    return GC_getspecific(GC_thread_key);
161
# endif
162
}
163

164
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int kind)
162,871,941✔
165
{
166
    size_t granules;
167
    void *tsd;
168
    void *result;
169

170
#   if MAXOBJKINDS > THREAD_FREELISTS_KINDS
171
      if (EXPECT(kind >= THREAD_FREELISTS_KINDS, FALSE)) {
162,871,941✔
172
        return GC_malloc_kind_global(bytes, kind);
8,434,376✔
173
      }
174
#   endif
175
    tsd = GC_get_tlfs();
154,437,565✔
176
    if (EXPECT(NULL == tsd, FALSE)) {
154,481,473✔
177
        return GC_malloc_kind_global(bytes, kind);
×
178
    }
179
    GC_ASSERT(GC_is_initialized);
154,481,473✔
180
    GC_ASSERT(GC_is_thread_tsd_valid(tsd));
154,481,473✔
181
    granules = ROUNDED_UP_GRANULES(bytes);
154,473,820✔
182
#   if defined(CPPCHECK)
183
#     define MALLOC_KIND_PTRFREE_INIT (void*)1
184
#   else
185
#     define MALLOC_KIND_PTRFREE_INIT NULL
186
#   endif
187
    GC_FAST_MALLOC_GRANS(result, granules,
154,473,820✔
188
                         ((GC_tlfs)tsd) -> _freelists[kind], DIRECT_GRANULES,
189
                         kind, GC_malloc_kind_global(bytes, kind),
190
                         (void)(kind == PTRFREE ? MALLOC_KIND_PTRFREE_INIT
191
                                               : (obj_link(result) = 0)));
192
#   ifdef LOG_ALLOCS
193
      GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
194
                    (unsigned long)bytes, kind, result,
195
                    (unsigned long)GC_gc_no);
196
#   endif
197
    return result;
154,468,619✔
198
}
199

200
#ifdef GC_GCJ_SUPPORT
201

202
# include "gc/gc_gcj.h"
203

204
/* Gcj-style allocation without locks is extremely tricky.  The         */
205
/* fundamental issue is that we may end up marking a free list, which   */
206
/* has freelist links instead of "vtable" pointers.  That is usually    */
207
/* OK, since the next object on the free list will be cleared, and      */
208
/* will thus be interpreted as containing a zero descriptor.  That's    */
209
/* fine if the object has not yet been initialized.  But there are      */
210
/* interesting potential races.                                         */
211
/* In the case of incremental collection, this seems hopeless, since    */
212
/* the marker may run asynchronously, and may pick up the pointer to    */
213
/* the next freelist entry (which it thinks is a vtable pointer), get   */
214
/* suspended for a while, and then see an allocated object instead      */
215
/* of the vtable.  This may be avoidable with either a handshake with   */
216
/* the collector or, probably more easily, by moving the free list      */
217
/* links to the second word of each object.  The latter isn't a         */
218
/* universal win, since on architecture like Itanium, nonzero offsets   */
219
/* are not necessarily free.  And there may be cache fill order issues. */
220
/* For now, we punt with incremental GC.  This probably means that      */
221
/* incremental GC should be enabled before we fork a second thread.     */
222
/* Unlike the other thread local allocation calls, we assume that the   */
223
/* collector has been explicitly initialized.                           */
224
GC_API GC_ATTR_MALLOC void * GC_CALL GC_gcj_malloc(size_t bytes,
907,098✔
225
                                    void * ptr_to_struct_containing_descr)
226
{
227
  if (EXPECT(GC_incremental, FALSE)) {
907,098✔
228
    return GC_core_gcj_malloc(bytes, ptr_to_struct_containing_descr);
907,098✔
229
  } else {
230
    size_t granules = ROUNDED_UP_GRANULES(bytes);
×
231
    void *result;
232
    void **tiny_fl;
233

234
    GC_ASSERT(GC_gcjobjfreelist != NULL);
×
235
    tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))->gcj_freelists;
×
236
    GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
×
237
                         GC_gcj_kind,
238
                         GC_core_gcj_malloc(bytes,
239
                                            ptr_to_struct_containing_descr),
240
                         {AO_compiler_barrier();
241
                          *(void **)result = ptr_to_struct_containing_descr;});
242
        /* This forces the initialization of the "method ptr".          */
243
        /* This is necessary to ensure some very subtle properties      */
244
        /* required if a GC is run in the middle of such an allocation. */
245
        /* Here we implicitly also assume atomicity for the free list.  */
246
        /* and method pointer assignments.                              */
247
        /* We must update the freelist before we store the pointer.     */
248
        /* Otherwise a GC at this point would see a corrupted           */
249
        /* free list.                                                   */
250
        /* A real memory barrier is not needed, since the               */
251
        /* action of stopping this thread will cause prior writes       */
252
        /* to complete.                                                 */
253
        /* We assert that any concurrent marker will stop us.           */
254
        /* Thus it is impossible for a mark procedure to see the        */
255
        /* allocation of the next object, but to see this object        */
256
        /* still containing a free list pointer.  Otherwise the         */
257
        /* marker, by misinterpreting the freelist link as a vtable     */
258
        /* pointer, might find a random "mark descriptor" in the next   */
259
        /* object.                                                      */
260
    return result;
×
261
  }
262
}
263

264
#endif /* GC_GCJ_SUPPORT */
265

266
/* The thread support layer must arrange to mark thread-local   */
267
/* free lists explicitly, since the link field is often         */
268
/* invisible to the marker.  It knows how to find all threads;  */
269
/* we take care of an individual thread freelist structure.     */
270
GC_INNER void GC_mark_thread_local_fls_for(GC_tlfs p)
86,088✔
271
{
272
    ptr_t q;
273
    int i, j;
274

275
    for (j = 0; j < TINY_FREELISTS; ++j) {
2,238,288✔
276
      for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
8,608,800✔
277
        /* Load the pointer atomically as it might be updated   */
278
        /* concurrently by GC_FAST_MALLOC_GRANS.                */
279
        q = (ptr_t)AO_load((volatile AO_t *)&p->_freelists[i][j]);
6,456,600✔
280
        if ((word)q > HBLKSIZE)
6,456,600✔
281
          GC_set_fl_marks(q);
1,014,908✔
282
      }
283
#     ifdef GC_GCJ_SUPPORT
284
        if (EXPECT(j > 0, TRUE)) {
2,152,200✔
285
          q = (ptr_t)AO_load((volatile AO_t *)&p->gcj_freelists[j]);
2,066,112✔
286
          if ((word)q > HBLKSIZE)
2,066,112✔
287
            GC_set_fl_marks(q);
×
288
        }
289
#     endif
290
    }
291
}
86,088✔
292

293
#if defined(GC_ASSERTIONS)
294
    /* Check that all thread-local free-lists in p are completely marked. */
295
    void GC_check_tls_for(GC_tlfs p)
86,175✔
296
    {
297
        int i, j;
298

299
        for (j = 1; j < TINY_FREELISTS; ++j) {
2,154,423✔
300
          for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
8,272,988✔
301
            GC_check_fl_marks(&p->_freelists[i][j]);
6,204,740✔
302
          }
303
#         ifdef GC_GCJ_SUPPORT
304
            GC_check_fl_marks(&p->gcj_freelists[j]);
2,068,248✔
305
#         endif
306
        }
307
    }
86,177✔
308
#endif /* GC_ASSERTIONS */
309

310
#endif /* THREAD_LOCAL_ALLOC */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc