• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / bdwgc / 1466

24 Mar 2023 09:19PM UTC coverage: 76.804% (+0.6%) from 76.199%
1466

push

travis-ci-com

ivmai
Do not add extra byte to non-small uncollectible objects

Previously EXTRA_BYTES value was not added to the allocation size only
for small uncollectible objects.

* include/gc/gc.h (GC_all_interior_pointers): Refine comment (regarding
uncollectible objects).
* malloc.c (GC_generic_malloc_uncollectable): Define lb_orig local
variable; decrement lb before if(SMALL_OBJ(lb)); do not expect original
lb is zero; pass lb_orig (instead of lb) to GC_DBG_COLLECT_AT_MALLOC()
and to oom_fn().

3 of 3 new or added lines in 1 file covered. (100.0%)

7771 of 10118 relevant lines covered (76.8%)

8832882.01 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.88
/thread_local_alloc.c
1
/*
2
 * Copyright (c) 2000-2005 by Hewlett-Packard Company.  All rights reserved.
3
 * Copyright (c) 2008-2022 Ivan Maidanski
4
 *
5
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
7
 *
8
 * Permission is hereby granted to use or copy this program
9
 * for any purpose, provided the above notices are retained on all copies.
10
 * Permission to modify the code and to distribute modified code is granted,
11
 * provided the above notices are retained, and a notice that the code was
12
 * modified is included with the above copyright notice.
13
 */
14

15
#include "private/gc_priv.h"
16

17
#if defined(THREAD_LOCAL_ALLOC)
18

19
#if !defined(THREADS) && !defined(CPPCHECK)
20
# error Invalid config - THREAD_LOCAL_ALLOC requires GC_THREADS
21
#endif
22

23
#include "private/thread_local_alloc.h"
24

25
#if defined(USE_COMPILER_TLS)
26
  __thread GC_ATTR_TLS_FAST
27
#elif defined(USE_WIN32_COMPILER_TLS)
28
  __declspec(thread) GC_ATTR_TLS_FAST
29
#endif
30
GC_key_t GC_thread_key;
31

32
static GC_bool keys_initialized;
33

34
/* Return a single nonempty freelist fl to the global one pointed to    */
35
/* by gfl.                                                              */
36

37
static void return_single_freelist(void *fl, void **gfl)
1,174✔
38
{
39
    if (*gfl == 0) {
1,174✔
40
      *gfl = fl;
998✔
41
    } else {
42
      void *q, **qptr;
43

44
      GC_ASSERT(GC_size(fl) == GC_size(*gfl));
176✔
45
      /* Concatenate: */
46
        qptr = &(obj_link(fl));
176✔
47
        while ((word)(q = *qptr) >= HBLKSIZE)
8,162✔
48
          qptr = &(obj_link(q));
7,810✔
49
        GC_ASSERT(0 == q);
176✔
50
        *qptr = *gfl;
176✔
51
        *gfl = fl;
176✔
52
    }
53
}
1,174✔
54

55
/* Recover the contents of the freelist array fl into the global one gfl. */
56
static void return_freelists(void **fl, void **gfl)
170,032✔
57
{
58
    int i;
59

60
    for (i = 1; i < TINY_FREELISTS; ++i) {
4,250,800✔
61
        if ((word)(fl[i]) >= HBLKSIZE) {
4,080,768✔
62
          return_single_freelist(fl[i], &gfl[i]);
1,174✔
63
        }
64
        /* Clear fl[i], since the thread structure may hang around.     */
65
        /* Do it in a way that is likely to trap if we access it.       */
66
        fl[i] = (ptr_t)HBLKSIZE;
4,080,768✔
67
    }
68
    /* The 0 granule freelist really contains 1 granule objects.        */
69
    if ((word)fl[0] >= HBLKSIZE
170,032✔
70
#       ifdef GC_GCJ_SUPPORT
71
          && fl[0] != ERROR_FL
42,508✔
72
#       endif
73
       ) {
74
        return_single_freelist(fl[0], &gfl[1]);
×
75
    }
76
}
170,032✔
77

78
#ifdef USE_PTHREAD_SPECIFIC
79
  /* Re-set the TLS value on thread cleanup to allow thread-local       */
80
  /* allocations to happen in the TLS destructors.                      */
81
  /* GC_unregister_my_thread (and similar routines) will finally set    */
82
  /* the GC_thread_key to NULL preventing this destructor from being    */
83
  /* called repeatedly.                                                 */
84
  static void reset_thread_key(void* v) {
85
    pthread_setspecific(GC_thread_key, v);
86
  }
87
#else
88
# define reset_thread_key 0
89
#endif
90

91
/* Each thread structure must be initialized.   */
92
/* This call must be made from the new thread.  */
93
GC_INNER void GC_init_thread_local(GC_tlfs p)
42,778✔
94
{
95
    int i, j, res;
96

97
    GC_ASSERT(I_HOLD_LOCK());
42,778✔
98
    if (!EXPECT(keys_initialized, TRUE)) {
42,778✔
99
#       ifdef USE_CUSTOM_SPECIFIC
100
          /* Ensure proper alignment of a "pushed" GC symbol.   */
101
          GC_ASSERT((word)(&GC_thread_key) % sizeof(word) == 0);
102
#       endif
103
        res = GC_key_create(&GC_thread_key, reset_thread_key);
32✔
104
        if (COVERT_DATAFLOW(res) != 0) {
32✔
105
            ABORT("Failed to create key for local allocator");
×
106
        }
107
        keys_initialized = TRUE;
32✔
108
    }
109
    res = GC_setspecific(GC_thread_key, p);
42,778✔
110
    if (COVERT_DATAFLOW(res) != 0) {
42,778✔
111
        ABORT("Failed to set thread specific allocation pointers");
×
112
    }
113
    for (j = 0; j < TINY_FREELISTS; ++j) {
1,112,228✔
114
        for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
4,277,800✔
115
            p -> _freelists[i][j] = (void *)(word)1;
3,208,350✔
116
        }
117
#       ifdef GC_GCJ_SUPPORT
118
            p -> gcj_freelists[j] = (void *)(word)1;
1,069,450✔
119
#       endif
120
    }
121
    /* The size 0 free lists are handled like the regular free lists,   */
122
    /* to ensure that the explicit deallocation works.  However,        */
123
    /* allocation of a size 0 "gcj" object is always an error.          */
124
#   ifdef GC_GCJ_SUPPORT
125
        p -> gcj_freelists[0] = ERROR_FL;
42,778✔
126
#   endif
127
}
42,778✔
128

129
GC_INNER void GC_destroy_thread_local(GC_tlfs p)
42,508✔
130
{
131
    int k;
132

133
    GC_ASSERT(I_HOLD_LOCK());
42,508✔
134
    /* We currently only do this from the thread itself.        */
135
    GC_STATIC_ASSERT(THREAD_FREELISTS_KINDS <= MAXOBJKINDS);
136
    for (k = 0; k < THREAD_FREELISTS_KINDS; ++k) {
170,032✔
137
        if (k == (int)GC_n_kinds)
127,524✔
138
            break; /* kind is not created */
×
139
        return_freelists(p -> _freelists[k], GC_obj_kinds[k].ok_freelist);
127,524✔
140
    }
141
#   ifdef GC_GCJ_SUPPORT
142
        return_freelists(p -> gcj_freelists, (void **)GC_gcjobjfreelist);
42,508✔
143
#   endif
144
}
42,508✔
145

146
STATIC void *GC_get_tlfs(void)
154,438,305✔
147
{
148
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
149
    GC_key_t k = GC_thread_key;
154,438,305✔
150

151
    if (EXPECT(0 == k, FALSE)) {
154,438,305✔
152
      /* We have not yet run GC_init_parallel.  That means we also  */
153
      /* are not locking, so GC_malloc_kind_global is fairly cheap. */
154
      return NULL;
×
155
    }
156
    return GC_getspecific(k);
154,438,305✔
157
# else
158
    if (EXPECT(!keys_initialized, FALSE)) return NULL;
159

160
    return GC_getspecific(GC_thread_key);
161
# endif
162
}
163

164
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int kind)
162,861,199✔
165
{
166
    size_t granules;
167
    void *tsd;
168
    void *result;
169

170
#   if MAXOBJKINDS > THREAD_FREELISTS_KINDS
171
      if (EXPECT(kind >= THREAD_FREELISTS_KINDS, FALSE)) {
162,861,199✔
172
        return GC_malloc_kind_global(bytes, kind);
8,432,461✔
173
      }
174
#   endif
175
    tsd = GC_get_tlfs();
154,428,738✔
176
    if (EXPECT(NULL == tsd, FALSE)) {
154,452,167✔
177
        return GC_malloc_kind_global(bytes, kind);
×
178
    }
179
    GC_ASSERT(GC_is_initialized);
154,452,167✔
180
    GC_ASSERT(GC_is_thread_tsd_valid(tsd));
154,452,167✔
181
    granules = ALLOC_REQUEST_GRANS(bytes);
154,463,855✔
182
#   if defined(CPPCHECK)
183
#     define MALLOC_KIND_PTRFREE_INIT (void*)1
184
#   else
185
#     define MALLOC_KIND_PTRFREE_INIT NULL
186
#   endif
187
    GC_FAST_MALLOC_GRANS(result, granules,
154,463,855✔
188
                         ((GC_tlfs)tsd) -> _freelists[kind], DIRECT_GRANULES,
189
                         kind, GC_malloc_kind_global(bytes, kind),
190
                         (void)(kind == PTRFREE ? MALLOC_KIND_PTRFREE_INIT
191
                                               : (obj_link(result) = 0)));
192
#   ifdef LOG_ALLOCS
193
      GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
194
                    (unsigned long)bytes, kind, result,
195
                    (unsigned long)GC_gc_no);
196
#   endif
197
    return result;
154,458,360✔
198
}
199

200
#ifdef GC_GCJ_SUPPORT
201

202
# include "gc/gc_gcj.h"
203

204
/* Gcj-style allocation without locks is extremely tricky.  The         */
205
/* fundamental issue is that we may end up marking a free list, which   */
206
/* has freelist links instead of "vtable" pointers.  That is usually    */
207
/* OK, since the next object on the free list will be cleared, and      */
208
/* will thus be interpreted as containing a zero descriptor.  That's    */
209
/* fine if the object has not yet been initialized.  But there are      */
210
/* interesting potential races.                                         */
211
/* In the case of incremental collection, this seems hopeless, since    */
212
/* the marker may run asynchronously, and may pick up the pointer to    */
213
/* the next freelist entry (which it thinks is a vtable pointer), get   */
214
/* suspended for a while, and then see an allocated object instead      */
215
/* of the vtable.  This may be avoidable with either a handshake with   */
216
/* the collector or, probably more easily, by moving the free list      */
217
/* links to the second word of each object.  The latter isn't a         */
218
/* universal win, since on architecture like Itanium, nonzero offsets   */
219
/* are not necessarily free.  And there may be cache fill order issues. */
220
/* For now, we punt with incremental GC.  This probably means that      */
221
/* incremental GC should be enabled before we fork a second thread.     */
222
/* Unlike the other thread local allocation calls, we assume that the   */
223
/* collector has been explicitly initialized.                           */
224
GC_API GC_ATTR_MALLOC void * GC_CALL GC_gcj_malloc(size_t bytes,
453,481✔
225
                                    void * ptr_to_struct_containing_descr)
226
{
227
  if (EXPECT(GC_incremental, FALSE)) {
453,481✔
228
    return GC_core_gcj_malloc(bytes, ptr_to_struct_containing_descr, 0);
453,481✔
229
  } else {
230
    size_t granules = ALLOC_REQUEST_GRANS(bytes);
×
231
    void *result;
232
    void **tiny_fl;
233

234
    GC_ASSERT(GC_gcjobjfreelist != NULL);
×
235
    tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))->gcj_freelists;
×
236
    GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
×
237
                         GC_gcj_kind,
238
                         GC_core_gcj_malloc(bytes,
239
                                            ptr_to_struct_containing_descr,
240
                                            0 /* flags */),
241
                         {AO_compiler_barrier();
242
                          *(void **)result = ptr_to_struct_containing_descr;});
243
        /* This forces the initialization of the "method ptr".          */
244
        /* This is necessary to ensure some very subtle properties      */
245
        /* required if a GC is run in the middle of such an allocation. */
246
        /* Here we implicitly also assume atomicity for the free list.  */
247
        /* and method pointer assignments.                              */
248
        /* We must update the freelist before we store the pointer.     */
249
        /* Otherwise a GC at this point would see a corrupted           */
250
        /* free list.                                                   */
251
        /* A real memory barrier is not needed, since the               */
252
        /* action of stopping this thread will cause prior writes       */
253
        /* to complete.                                                 */
254
        /* We assert that any concurrent marker will stop us.           */
255
        /* Thus it is impossible for a mark procedure to see the        */
256
        /* allocation of the next object, but to see this object        */
257
        /* still containing a free list pointer.  Otherwise the         */
258
        /* marker, by misinterpreting the freelist link as a vtable     */
259
        /* pointer, might find a random "mark descriptor" in the next   */
260
        /* object.                                                      */
261
    return result;
×
262
  }
263
}
264

265
#endif /* GC_GCJ_SUPPORT */
266

267
/* The thread support layer must arrange to mark thread-local   */
268
/* free lists explicitly, since the link field is often         */
269
/* invisible to the marker.  It knows how to find all threads;  */
270
/* we take care of an individual thread freelist structure.     */
271
GC_INNER void GC_mark_thread_local_fls_for(GC_tlfs p)
228,820✔
272
{
273
    ptr_t q;
274
    int i, j;
275

276
    for (j = 0; j < TINY_FREELISTS; ++j) {
5,949,320✔
277
      for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
22,882,000✔
278
        /* Load the pointer atomically as it might be updated   */
279
        /* concurrently by GC_FAST_MALLOC_GRANS.                */
280
        q = (ptr_t)AO_load((volatile AO_t *)&p->_freelists[i][j]);
17,161,500✔
281
        if ((word)q > HBLKSIZE)
17,161,500✔
282
          GC_set_fl_marks(q);
1,250,147✔
283
      }
284
#     ifdef GC_GCJ_SUPPORT
285
        if (EXPECT(j > 0, TRUE)) {
5,720,500✔
286
          q = (ptr_t)AO_load((volatile AO_t *)&p->gcj_freelists[j]);
5,491,680✔
287
          if ((word)q > HBLKSIZE)
5,491,680✔
288
            GC_set_fl_marks(q);
×
289
        }
290
#     endif
291
    }
292
}
228,820✔
293

294
#if defined(GC_ASSERTIONS)
295
    /* Check that all thread-local free-lists in p are completely marked. */
296
    void GC_check_tls_for(GC_tlfs p)
228,846✔
297
    {
298
        int i, j;
299

300
        for (j = 1; j < TINY_FREELISTS; ++j) {
5,721,126✔
301
          for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
21,969,122✔
302
            GC_check_fl_marks(&p->_freelists[i][j]);
16,476,842✔
303
          }
304
#         ifdef GC_GCJ_SUPPORT
305
            GC_check_fl_marks(&p->gcj_freelists[j]);
5,492,280✔
306
#         endif
307
        }
308
    }
228,845✔
309
#endif /* GC_ASSERTIONS */
310

311
#endif /* THREAD_LOCAL_ALLOC */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc