• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / libatomic_ops / 596

23 May 2023 05:55AM UTC coverage: 96.426% (-0.1%) from 96.557%
596

push

travis-ci-com

ivmai
DRAFT 204 - test all on dist jammy

2725 of 2826 relevant lines covered (96.43%)

986507.48 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.36
/src/atomic_ops_stack.c
1
/*
2
 * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
3
 *
4
 * This program is free software; you can redistribute it and/or modify
5
 * it under the terms of the GNU General Public License as published by
6
 * the Free Software Foundation; either version 2 of the License, or
7
 * (at your option) any later version.
8
 *
9
 * This program is distributed in the hope that it will be useful,
10
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
 * GNU General Public License for more details.
13
 *
14
 * You should have received a copy of the GNU General Public License along
15
 * with this program; if not, write to the Free Software Foundation, Inc.,
16
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17
 */
18

19
#if defined(HAVE_CONFIG_H)
20
# include "config.h"
21
#endif
22

23
#include <string.h>
24
#include <stdlib.h>
25
#include <assert.h>
26

27
#ifndef AO_BUILD
28
# define AO_BUILD
29
#endif
30

31
#ifndef AO_REAL_PTR_AS_MACRO
32
# define AO_REAL_PTR_AS_MACRO
33
#endif
34

35
#define AO_REQUIRE_CAS
36
#include "atomic_ops_stack.h"
37

38
AO_API void AO_stack_init(AO_stack_t *list)
×
39
{
40
  memset(list, 0, sizeof(AO_stack_t));
×
41
}
×
42

43
AO_API int AO_stack_is_lock_free(void)
1✔
44
{
45
# ifdef AO_USE_ALMOST_LOCK_FREE
46
    return 0;
47
# else
48
    return 1;
1✔
49
# endif
50
}
51

52
AO_API AO_t *AO_stack_head_ptr(const AO_stack_t *list)
48✔
53
{
54
  return AO_REAL_HEAD_PTR(*list);
48✔
55
}
56

57
AO_API AO_t *AO_stack_next_ptr(AO_t next)
2,448✔
58
{
59
  return AO_REAL_NEXT_PTR(next);
2,448✔
60
}
61

62
/* This function call must be a part of a do-while loop with a CAS      */
63
/* designating the condition of the loop (see the use cases below).     */
64
#ifdef AO_THREAD_SANITIZER
65
  AO_ATTR_NO_SANITIZE_THREAD
66
  static void store_before_cas(AO_t *addr, AO_t value)
67
  {
68
    *addr = value;
69
  }
70
#else
71
# define store_before_cas(addr, value) (void)(*(addr) = (value))
72
#endif
73

74
#ifdef AO_USE_ALMOST_LOCK_FREE
75

76
# ifdef __cplusplus
77
    extern "C" {
78
# endif
79
  AO_API void AO_pause(int); /* defined in atomic_ops.c */
80
# ifdef __cplusplus
81
    } /* extern "C" */
82
# endif
83

84
# if defined(__alpha__) && (__GNUC__ == 4)
85
    /* Workaround __builtin_expect bug found in         */
86
    /* gcc-4.6.3/alpha causing test_stack failure.      */
87
#   undef AO_EXPECT_FALSE
88
#   define AO_EXPECT_FALSE(expr) (expr)
89
# endif
90

91
  /* LIFO linked lists based on compare-and-swap.  We need to avoid     */
92
  /* the case of a node deletion and reinsertion while I'm deleting     */
93
  /* it, since that may cause my CAS to succeed eventhough the next     */
94
  /* pointer is now wrong.  Our solution is not fully lock-free, but it */
95
  /* is good enough for signal handlers, provided we have a suitably    */
96
  /* low bound on the number of recursive signal handler reentries.     */
97
  /* A list consists of a first pointer and a blacklist of pointer      */
98
  /* values that are currently being removed.  No list element on       */
99
  /* the blacklist may be inserted.  If we would otherwise do so, we    */
100
  /* are allowed to insert a variant that differs only in the least     */
101
  /* significant, ignored, bits.  If the list is full, we wait.         */
102

103
  /* Crucial observation: A particular padded pointer x (i.e. pointer   */
104
  /* plus arbitrary low order bits) can never be newly inserted into    */
105
  /* a list while it's in the corresponding auxiliary data structure.   */
106

107
  /* The second argument is a pointer to the link field of the element  */
108
  /* to be inserted.                                                    */
109
  /* Both list headers and link fields contain "perturbed" pointers,    */
110
  /* i.e. pointers with extra bits or'ed into the low order bits.       */
111
  AO_API void AO_stack_push_explicit_aux_release(volatile AO_t *list, AO_t *x,
112
                                                 AO_stack_aux *a)
113
  {
114
    AO_t x_bits = (AO_t)x;
115
    AO_t next;
116

117
    /* No deletions of x can start here, since x is not         */
118
    /* currently in the list.                                   */
119
  retry:
120
    do {
121
      next = AO_load_acquire(list);
122
      store_before_cas(x, next);
123
      {
124
#     if AO_BL_SIZE == 2
125
        /* Start all loads as close to concurrently as possible.        */
126
        AO_t entry1 = AO_load(&a->AO_stack_bl[0]);
127
        AO_t entry2 = AO_load(&a->AO_stack_bl[1]);
128
        if (AO_EXPECT_FALSE(entry1 == x_bits || entry2 == x_bits))
129
#     else
130
        int i;
131
        for (i = 0; i < AO_BL_SIZE; ++i)
132
          if (AO_EXPECT_FALSE(AO_load(&a->AO_stack_bl[i]) == x_bits))
133
#     endif
134
        {
135
          /* Entry is currently being removed.  Change it a little.     */
136
          ++x_bits;
137
          if ((x_bits & AO_BIT_MASK) == 0)
138
            /* Version count overflowed; EXTREMELY unlikely, but possible. */
139
            x_bits = (AO_t)x;
140
          goto retry;
141
        }
142
      }
143

144
      /* x_bits value is not currently being deleted.   */
145
    } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, next,
146
                                                          x_bits)));
147
  }
148

149
  /* I concluded experimentally that checking a value first before      */
150
  /* performing a compare-and-swap is usually beneficial on x86, but    */
151
  /* slows things down appreciably with contention on Itanium.          */
152
  /* Since the Itanium behavior makes more sense to me (more cache line */
153
  /* movement unless we're mostly reading, but back-off should guard    */
154
  /* against that), we take Itanium as the default.  Measurements on    */
155
  /* other multiprocessor architectures would be useful.                */
156
  /* (On a uniprocessor, the initial check is almost certainly a very   */
157
  /* small loss.) - HB                                                  */
158
# ifdef __i386__
159
#   define PRECHECK(a) (a) == 0 &&
160
# else
161
#   define PRECHECK(a)
162
# endif
163

164
  /* This function is used before CAS in the below AO_stack_pop and the */
165
  /* data race (reported by TSan) is OK because it results in a retry.  */
166
# ifdef AO_THREAD_SANITIZER
167
    AO_ATTR_NO_SANITIZE_THREAD
168
    static AO_t AO_load_next(const volatile AO_t *first_ptr)
169
    {
170
      /* Assuming an architecture on which loads of word type are       */
171
      /* atomic.  AO_load cannot be used here because it cannot be      */
172
      /* instructed to suppress the warning about the race.             */
173
      return *first_ptr;
174
    }
175
# else
176
#   define AO_load_next AO_load
177
# endif
178

179
  AO_API AO_t *AO_stack_pop_explicit_aux_acquire(volatile AO_t *list,
180
                                                 AO_stack_aux *a)
181
  {
182
    unsigned i;
183
    int j = 0;
184
    AO_t first;
185
    AO_t * first_ptr;
186
    AO_t next;
187

188
  retry:
189
    first = AO_load(list);
190
    if (0 == first) return 0;
191
    /* Insert first into aux black list.                                */
192
    /* This may spin if more than AO_BL_SIZE removals using auxiliary   */
193
    /* structure a are currently in progress.                           */
194
    for (i = 0; ; )
195
      {
196
        if (PRECHECK(a -> AO_stack_bl[i])
197
            AO_compare_and_swap_acquire(a->AO_stack_bl+i, 0, first))
198
          break;
199
        if (++i >= AO_BL_SIZE)
200
          {
201
            i = 0;
202
            AO_pause(++j);
203
          }
204
      }
205
#   ifndef AO_THREAD_SANITIZER
206
      assert(a -> AO_stack_bl[i] == first);
207
                                /* No actual race with the above CAS.   */
208
#   endif
209
    /* first is on the auxiliary black list.  It may be removed by      */
210
    /* another thread before we get to it, but a new insertion of x     */
211
    /* cannot be started here.  Only we can remove it from the black    */
212
    /* list.  We need to make sure that first is still the first entry  */
213
    /* on the list.  Otherwise it is possible that a reinsertion of it  */
214
    /* was already started before we added the black list entry.        */
215
    if (AO_EXPECT_FALSE(first != AO_load_acquire(list)))
216
                        /* Workaround test failure on AIX, at least, by */
217
                        /* using acquire ordering semantics for this    */
218
                        /* load.  Probably, it is not the right fix.    */
219
    {
220
      AO_store_release(a->AO_stack_bl+i, 0);
221
      goto retry;
222
    }
223
    first_ptr = AO_REAL_NEXT_PTR(first);
224
    next = AO_load_next(first_ptr);
225
    if (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, first, next)))
226
    {
227
      AO_store_release(a->AO_stack_bl+i, 0);
228
      goto retry;
229
    }
230
#   ifndef AO_THREAD_SANITIZER
231
      assert(*list != first); /* No actual race with the above CAS.     */
232
#   endif
233
    /* Since we never insert an entry on the black list, this cannot    */
234
    /* have succeeded unless first remained on the list while we were   */
235
    /* running.  Thus, its next link cannot have changed out from under */
236
    /* us, and we removed exactly one entry and preserved the rest of   */
237
    /* the list.  Note that it is quite possible that an additional     */
238
    /* entry was inserted and removed while we were running; this is OK */
239
    /* since the part of the list following first must have remained    */
240
    /* unchanged, and first must again have been at the head of the     */
241
    /* list when the compare_and_swap succeeded.                        */
242
    AO_store_release(a->AO_stack_bl+i, 0);
243
    return first_ptr;
244
  }
245

246
  AO_API void AO_stack_push_release(AO_stack_t *list, AO_t *x)
247
  {
248
    AO_stack_push_explicit_aux_release(&list->AO_pa.AO_ptr, x,
249
                                       &list->AO_pa.AO_aux);
250
  }
251

252
  AO_API AO_t *AO_stack_pop_acquire(AO_stack_t *list)
253
  {
254
    return AO_stack_pop_explicit_aux_acquire(&list->AO_pa.AO_ptr,
255
                                             &list->AO_pa.AO_aux);
256
  }
257

258
#else /* ! USE_ALMOST_LOCK_FREE */
259

260
  /* The functionality is the same as of AO_load_next but the atomicity */
261
  /* is not needed.  The usage is similar to that of store_before_cas.  */
262
# if defined(AO_THREAD_SANITIZER) \
263
     && (defined(AO_HAVE_compare_and_swap_double) \
264
         || defined(AO_HAVE_compare_double_and_swap_double))
265
    /* TODO: If compiled by Clang (as of clang-4.0) with -O3 flag,      */
266
    /* no_sanitize attribute is ignored unless the argument is volatile.*/
267
#   if defined(__clang__)
268
#     define LOAD_BEFORE_CAS_VOLATILE volatile
269
#   else
270
#     define LOAD_BEFORE_CAS_VOLATILE /* empty */
271
#   endif
272
    AO_ATTR_NO_SANITIZE_THREAD
273
    static AO_t load_before_cas(const LOAD_BEFORE_CAS_VOLATILE AO_t *addr)
274
    {
275
      return *addr;
276
    }
277
# else
278
#   define load_before_cas(addr) (*(addr))
279
# endif /* !AO_THREAD_SANITIZER */
280

281
  /* Better names for fields in AO_stack_t.     */
282
# define version AO_vp.AO_val1
283
# define ptr AO_vp.AO_val2
284

285
# if defined(AO_HAVE_compare_double_and_swap_double) \
286
     && !(defined(AO_STACK_PREFER_CAS_DOUBLE) \
287
          && defined(AO_HAVE_compare_and_swap_double))
288

289
#   ifdef LINT2
290
      volatile /* non-static */ AO_t AO_noop_sink;
291
#   endif
292

293
    AO_API void AO_stack_push_release(AO_stack_t *list, AO_t *element)
34,827,058✔
294
    {
295
      AO_t next;
296

297
      do {
298
        next = AO_load(&list->ptr);
34,827,058✔
299
        store_before_cas(element, next);
36,958,487✔
300
      } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(&list->ptr, next,
36,958,487✔
301
                                                            (AO_t)element)));
302
      /* This uses a narrow CAS here, an old optimization suggested     */
303
      /* by Treiber.  Pop is still safe, since we run into the ABA      */
304
      /* problem only if there were both intervening pops and pushes.   */
305
      /* In that case we still see a change in the version number.      */
306
#     ifdef LINT2
307
        /* Instruct static analyzer that element is not lost.   */
308
        AO_noop_sink = (AO_t)element;
309
#     endif
310
    }
31,378,691✔
311

312
    AO_API AO_t *AO_stack_pop_acquire(AO_stack_t *list)
36,078,205✔
313
    {
314
#     if defined(__clang__) && !AO_CLANG_PREREQ(3, 5)
315
        AO_t *volatile cptr;
316
                        /* Use volatile to workaround a bug in          */
317
                        /* clang-1.1/x86 causing test_stack failure.    */
318
#     else
319
        AO_t *cptr;
320
#     endif
321
      AO_t next;
322
      AO_t cversion;
323

324
      do {
325
        /* Version must be loaded first.    */
326
        cversion = AO_load_acquire(&list->version);
36,078,205✔
327
        cptr = (AO_t *)AO_load(&list->ptr);
36,680,318✔
328
        if (NULL == cptr)
36,474,001✔
329
          return NULL;
173✔
330
        next = load_before_cas((AO_t *)cptr);
36,473,828✔
331
      } while (AO_EXPECT_FALSE(!AO_compare_double_and_swap_double_release(
36,473,828✔
332
                                        &list->AO_vp, cversion, (AO_t)cptr,
333
                                        cversion+1, next)));
334
      return cptr;
31,670,775✔
335
    }
336

337
# elif defined(AO_HAVE_compare_and_swap_double)
338

339
    /* Needed for future IA64 processors.  No current clients?  */
340
    /* TODO: Not tested thoroughly. */
341

342
    /* We have a wide CAS, but only does an AO_t-wide comparison.       */
343
    /* We cannot use the Treiber optimization, since we only check      */
344
    /* for an unchanged version number, not an unchanged pointer.       */
345
    AO_API void AO_stack_push_release(AO_stack_t *list, AO_t *element)
346
    {
347
      AO_t cversion;
348

349
      do {
350
        AO_t next_ptr;
351

352
        /* Again version must be loaded first, for different reason.    */
353
        cversion = AO_load_acquire(&list->version);
354
        next_ptr = AO_load(&list->ptr);
355
        store_before_cas(element, next_ptr);
356
      } while (!AO_compare_and_swap_double_release(&list->AO_vp, cversion,
357
                                                   cversion+1, (AO_t)element));
358
    }
359

360
    AO_API AO_t *AO_stack_pop_acquire(AO_stack_t *list)
361
    {
362
      AO_t *cptr;
363
      AO_t next;
364
      AO_t cversion;
365

366
      do {
367
        cversion = AO_load_acquire(&list->version);
368
        cptr = (AO_t *)AO_load(&list->ptr);
369
        if (NULL == cptr)
370
          return NULL;
371
        next = load_before_cas(cptr);
372
      } while (!AO_compare_double_and_swap_double_release(&list->AO_vp,
373
                                cversion, (AO_t)cptr, cversion+1, next));
374
      return cptr;
375
    }
376
# endif /* AO_HAVE_compare_and_swap_double */
377

378
# undef ptr
379
# undef version
380
#endif /* ! USE_ALMOST_LOCK_FREE */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc