• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ivmai / libatomic_ops / 596

23 May 2023 05:55AM UTC coverage: 96.426% (-0.1%) from 96.557%
596

push

travis-ci-com

ivmai
DRAFT 204 - test all on dist jammy

2725 of 2826 relevant lines covered (96.43%)

986507.48 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.59
/src/atomic_ops_malloc.c
1
/*
2
 * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
3
 *
4
 * This program is free software; you can redistribute it and/or modify
5
 * it under the terms of the GNU General Public License as published by
6
 * the Free Software Foundation; either version 2 of the License, or
7
 * (at your option) any later version.
8
 *
9
 * This program is distributed in the hope that it will be useful,
10
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
 * GNU General Public License for more details.
13
 *
14
 * You should have received a copy of the GNU General Public License along
15
 * with this program; if not, write to the Free Software Foundation, Inc.,
16
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17
 */
18

19
#if defined(HAVE_CONFIG_H)
20
# include "config.h"
21
#endif
22

23
#ifdef DONT_USE_MMAP /* for testing */
24
# undef HAVE_MMAP
25
#endif
26

27
#ifndef AO_BUILD
28
# define AO_BUILD
29
#endif
30

31
#define AO_REQUIRE_CAS
32
#include "atomic_ops_malloc.h"
33

34
#include <string.h>     /* for ffs, which is assumed reentrant. */
35
#include <stdlib.h>
36
#include <assert.h>
37

38
#ifdef AO_TRACE_MALLOC
39
# include <stdio.h>
40
# include <pthread.h>
41
#endif
42

43
#if defined(AO_ADDRESS_SANITIZER) && !defined(AO_NO_MALLOC_POISON)
44
  /* #include "sanitizer/asan_interface.h" */
45
  void __asan_poison_memory_region(void *, size_t);
46
  void __asan_unpoison_memory_region(void *, size_t);
47
# define ASAN_POISON_MEMORY_REGION(addr, size) \
48
                __asan_poison_memory_region(addr, size)
49
# define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
50
                __asan_unpoison_memory_region(addr, size)
51
#else
52
# define ASAN_POISON_MEMORY_REGION(addr, size) (void)0
53
# define ASAN_UNPOISON_MEMORY_REGION(addr, size) (void)0
54
#endif /* !AO_ADDRESS_SANITIZER */
55

56
#if (defined(_WIN32_WCE) || defined(__MINGW32CE__)) && !defined(AO_HAVE_abort)
57
# define abort() _exit(-1) /* there is no abort() in WinCE */
58
#endif
59

60
/*
61
 * We round up each allocation request to the next power of two
62
 * minus one word.
63
 * We keep one stack of free objects for each size.  Each object
64
 * has an initial word (offset -sizeof(AO_t) from the visible pointer)
65
 * which contains either
66
 *      The binary log of the object size in bytes (small objects)
67
 *      The object size (a multiple of CHUNK_SIZE) for large objects.
68
 * The second case only arises if mmap-based allocation is supported.
69
 * We align the user-visible part of each object on a GRANULARITY
70
 * byte boundary.  That means that the actual (hidden) start of
71
 * the object starts a word before this boundary.
72
 */
73

74
#ifndef LOG_MAX_SIZE
75
# define LOG_MAX_SIZE 16
76
        /* We assume that 2**LOG_MAX_SIZE is a multiple of page size. */
77
#endif
78

79
#ifndef ALIGNMENT
80
# define ALIGNMENT 16
81
        /* Assumed to be at least sizeof(AO_t).         */
82
#endif
83

84
#define CHUNK_SIZE (1 << LOG_MAX_SIZE)
85

86
#ifndef AO_INITIAL_HEAP_SIZE
87
#  define AO_INITIAL_HEAP_SIZE (2*(LOG_MAX_SIZE+1)*CHUNK_SIZE)
88
#endif
89

90
char AO_initial_heap[AO_INITIAL_HEAP_SIZE];
91

92
static volatile AO_t initial_heap_ptr = (AO_t)AO_initial_heap;
93

94
#if defined(HAVE_MMAP)
95

96
#include <sys/types.h>
97
#include <sys/stat.h>
98
#include <fcntl.h>
99
#include <sys/mman.h>
100

101
#if defined(MAP_ANONYMOUS) || defined(MAP_ANON)
102
# define USE_MMAP_ANON
103
#endif
104

105
#ifdef USE_MMAP_FIXED
106
# define GC_MMAP_FLAGS (MAP_FIXED | MAP_PRIVATE)
107
        /* Seems to yield better performance on Solaris 2, but can      */
108
        /* be unreliable if something is already mapped at the address. */
109
#else
110
# define GC_MMAP_FLAGS MAP_PRIVATE
111
#endif
112

113
#ifdef USE_MMAP_ANON
114
# if defined(CPPCHECK)
115
#   define OPT_MAP_ANON 0x20 /* taken from linux */
116
# elif defined(MAP_ANONYMOUS)
117
#   define OPT_MAP_ANON MAP_ANONYMOUS
118
# else
119
#   define OPT_MAP_ANON MAP_ANON
120
# endif
121
#else
122
# include <unistd.h> /* for close() */
123
# define OPT_MAP_ANON 0
124
#endif
125

126
static volatile AO_t mmap_enabled = 0;
127

128
AO_API void
129
AO_malloc_enable_mmap(void)
1✔
130
{
131
# if defined(__sun)
132
    AO_store_release(&mmap_enabled, 1);
133
            /* Workaround for Sun CC */
134
# else
135
    AO_store(&mmap_enabled, 1);
1✔
136
# endif
137
}
1✔
138

139
static char *get_mmaped(size_t sz)
155✔
140
{
141
  char * result;
142
# ifdef USE_MMAP_ANON
143
#   define zero_fd -1
144
# else
145
    int zero_fd;
146
# endif
147

148
  assert(!(sz & (CHUNK_SIZE - 1)));
149
  if (!mmap_enabled)
155✔
150
    return 0;
×
151

152
# ifndef USE_MMAP_ANON
153
    zero_fd = open("/dev/zero", O_RDONLY);
154
    if (zero_fd == -1)
155
      return 0;
156
# endif
157
  result = (char *)mmap(0, sz, PROT_READ | PROT_WRITE,
155✔
158
                        GC_MMAP_FLAGS | OPT_MAP_ANON,
159
                        zero_fd, 0 /* offset */);
160
# ifndef USE_MMAP_ANON
161
    close(zero_fd);
162
# endif
163
  if (AO_EXPECT_FALSE(result == MAP_FAILED))
156✔
164
    result = NULL;
×
165
  return result;
156✔
166
}
167

168
#ifndef SIZE_MAX
169
# include <limits.h>
170
#endif
171
#if defined(SIZE_MAX) && !defined(CPPCHECK)
172
# define AO_SIZE_MAX ((size_t)SIZE_MAX)
173
            /* Extra cast to workaround some buggy SIZE_MAX definitions. */
174
#else
175
# define AO_SIZE_MAX (~(size_t)0)
176
#endif
177

178
/* Saturated addition of size_t values.  Used to avoid value wrap       */
179
/* around on overflow.  The arguments should have no side effects.      */
180
#define SIZET_SAT_ADD(a, b) \
181
    (AO_EXPECT_FALSE((a) >= AO_SIZE_MAX - (b)) ? AO_SIZE_MAX : (a) + (b))
182

183
/* Allocate an object of size (incl. header) of size > CHUNK_SIZE.      */
184
/* sz includes space for an AO_t-sized header.                          */
185
static char *
186
AO_malloc_large(size_t sz)
33✔
187
{
188
  void *result;
189

190
  /* The header will force us to waste ALIGNMENT bytes, incl. header.   */
191
  /* Round to multiple of CHUNK_SIZE.                                   */
192
  sz = SIZET_SAT_ADD(sz, ALIGNMENT + CHUNK_SIZE - 1) & ~(CHUNK_SIZE - 1);
33✔
193
  assert(sz > LOG_MAX_SIZE);
194
  result = get_mmaped(sz);
33✔
195
  if (AO_EXPECT_FALSE(NULL == result))
33✔
196
    return NULL;
×
197

198
  result = (AO_t *)result + ALIGNMENT / sizeof(AO_t);
33✔
199
  ((AO_t *)result)[-1] = (AO_t)sz;
33✔
200
  return (char *)result;
33✔
201
}
202

203
static void
204
AO_free_large(void *p)
33✔
205
{
206
  AO_t sz = ((AO_t *)p)[-1];
33✔
207
  if (munmap((AO_t *)p - ALIGNMENT / sizeof(AO_t), (size_t)sz) != 0)
33✔
208
    abort();  /* Programmer error.  Not really async-signal-safe, but ... */
×
209
}
33✔
210

211

212
#else /*  No MMAP */
213

214
AO_API void
215
AO_malloc_enable_mmap(void)
216
{
217
}
218

219
#define get_mmaped(sz) ((char*)0)
220
#define AO_malloc_large(sz) ((char*)0)
221
#define AO_free_large(p) abort()
222
                /* Programmer error.  Not really async-signal-safe, but ... */
223

224
#endif /* No MMAP */
225

226
static char *
227
get_chunk(void)
157✔
228
{
229
  char *my_chunk_ptr;
230

231
  for (;;) {
×
232
    char *initial_ptr = (char *)AO_load(&initial_heap_ptr);
157✔
233

234
    my_chunk_ptr = (char *)(((AO_t)initial_ptr + (ALIGNMENT - 1))
157✔
235
                            & ~(ALIGNMENT - 1));
157✔
236
    if (initial_ptr != my_chunk_ptr)
157✔
237
      {
238
        /* Align correctly.  If this fails, someone else did it for us. */
239
        (void)AO_compare_and_swap_acquire(&initial_heap_ptr,
×
240
                                    (AO_t)initial_ptr, (AO_t)my_chunk_ptr);
241
      }
242

243
    if (AO_EXPECT_FALSE((AO_t)my_chunk_ptr
156✔
244
            > (AO_t)(AO_initial_heap + AO_INITIAL_HEAP_SIZE - CHUNK_SIZE))) {
245
      /* We failed.  The initial heap is used up.       */
246
      my_chunk_ptr = get_mmaped(CHUNK_SIZE);
122✔
247
#     if !defined(CPPCHECK)
248
        assert(((AO_t)my_chunk_ptr & (ALIGNMENT-1)) == 0);
249
#     endif
250
      break;
123✔
251
    }
252
    if (AO_compare_and_swap(&initial_heap_ptr, (AO_t)my_chunk_ptr,
34✔
253
                            (AO_t)(my_chunk_ptr + CHUNK_SIZE))) {
34✔
254
      break;
34✔
255
    }
256
  }
257
  return my_chunk_ptr;
157✔
258
}
259

260
/* Object free lists.  Ith entry corresponds to objects         */
261
/* of total size 2**i bytes.                                    */
262
static AO_stack_t AO_free_list[LOG_MAX_SIZE+1];
263

264
/* Break up the chunk, and add it to the object free list for   */
265
/* the given size.  We have exclusive access to chunk.          */
266
static void add_chunk_as(void * chunk, unsigned log_sz)
157✔
267
{
268
  size_t ofs, limit;
269
  size_t sz = (size_t)1 << log_sz;
157✔
270

271
  assert(CHUNK_SIZE >= sz);
272
  assert(sz % sizeof(AO_t) == 0);
273
  limit = (size_t)CHUNK_SIZE - sz;
157✔
274
  for (ofs = ALIGNMENT - sizeof(AO_t); ofs <= limit; ofs += sz) {
46,413✔
275
    ASAN_POISON_MEMORY_REGION((char *)chunk + ofs + sizeof(AO_t),
276
                              sz - sizeof(AO_t));
277
    AO_stack_push(&AO_free_list[log_sz], (AO_t *)chunk + ofs / sizeof(AO_t));
46,282✔
278
  }
279
}
131✔
280

281
static const unsigned char msbs[16] = {
282
  0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4
283
};
284

285
/* Return the position of the most significant set bit in the   */
286
/* argument.                                                    */
287
/* We follow the conventions of ffs(), i.e. the least           */
288
/* significant bit is number one.                               */
289
static unsigned msb(size_t s)
15,775,723✔
290
{
291
  unsigned result = 0;
15,775,723✔
292
  if ((s & 0xff) != s) {
15,775,723✔
293
#   if (__SIZEOF_SIZE_T__ == 8) && !defined(CPPCHECK)
294
      unsigned v = (unsigned)(s >> 32);
6,619,374✔
295
      if (AO_EXPECT_FALSE(v != 0))
6,619,374✔
296
        {
297
          s = v;
×
298
          result += 32;
×
299
        }
300
#   elif __SIZEOF_SIZE_T__ == 4
301
      /* No op. */
302
#   else
303
      unsigned v;
304
      /* The following is a tricky code ought to be equivalent to       */
305
      /* "(v = s >> 32) != 0" but suppresses warnings on 32-bit arch's. */
306
#     define SIZEOF_SIZE_T_GT_4 (sizeof(size_t) > 4)
307
      if (SIZEOF_SIZE_T_GT_4
308
          && (v = (unsigned)(s >> (SIZEOF_SIZE_T_GT_4 ? 32 : 0))) != 0)
309
        {
310
          s = v;
311
          result += 32;
312
        }
313
#   endif /* !defined(__SIZEOF_SIZE_T__) */
314
    if (AO_EXPECT_FALSE((s >> 16) != 0))
6,619,374✔
315
      {
316
        s >>= 16;
×
317
        result += 16;
×
318
      }
319
    if ((s >> 8) != 0)
6,619,374✔
320
      {
321
        s >>= 8;
6,671,942✔
322
        result += 8;
6,671,942✔
323
      }
324
  }
325
  if (s > 15)
15,775,723✔
326
    {
327
      s >>= 4;
9,370,732✔
328
      result += 4;
9,370,732✔
329
    }
330
  result += msbs[s];
15,775,723✔
331
  return result;
15,775,723✔
332
}
333

334
AO_API AO_ATTR_MALLOC AO_ATTR_ALLOC_SIZE(1)
335
void *
336
AO_malloc(size_t sz)
15,529,078✔
337
{
338
  AO_t *result;
339
  unsigned log_sz;
340

341
  if (AO_EXPECT_FALSE(sz > CHUNK_SIZE - sizeof(AO_t)))
15,529,078✔
342
    return AO_malloc_large(sz);
33✔
343
  log_sz = msb(sz + (sizeof(AO_t) - 1));
15,529,045✔
344
  assert(log_sz <= LOG_MAX_SIZE);
345
  assert(((size_t)1 << log_sz) >= sz + sizeof(AO_t));
346
  result = AO_stack_pop(AO_free_list+log_sz);
16,042,009✔
347
  while (AO_EXPECT_FALSE(NULL == result)) {
16,047,839✔
348
    void * chunk = get_chunk();
157✔
349

350
    if (AO_EXPECT_FALSE(NULL == chunk))
157✔
351
      return NULL;
×
352
    add_chunk_as(chunk, log_sz);
157✔
353
    result = AO_stack_pop(AO_free_list+log_sz);
157✔
354
  }
355
  *result = log_sz;
16,047,682✔
356
# ifdef AO_TRACE_MALLOC
357
    fprintf(stderr, "%p: AO_malloc(%lu) = %p\n",
358
            (void *)pthread_self(), (unsigned long)sz, (void *)(result + 1));
359
# endif
360
  ASAN_UNPOISON_MEMORY_REGION(result + 1, sz);
361
  return result + 1;
16,047,682✔
362
}
363

364
AO_API void
365
AO_free(void *p)
15,796,404✔
366
{
367
  AO_t *base;
368
  int log_sz;
369

370
  if (AO_EXPECT_FALSE(NULL == p))
15,796,404✔
371
    return;
1✔
372

373
  base = (AO_t *)p - 1;
15,796,403✔
374
  log_sz = (int)(*base);
15,796,403✔
375
# ifdef AO_TRACE_MALLOC
376
    fprintf(stderr, "%p: AO_free(%p sz:%lu)\n", (void *)pthread_self(), p,
377
            log_sz > LOG_MAX_SIZE ? (unsigned)log_sz : 1UL << log_sz);
378
# endif
379
  if (AO_EXPECT_FALSE(log_sz > LOG_MAX_SIZE)) {
15,796,403✔
380
    AO_free_large(p);
33✔
381
  } else {
382
    ASAN_POISON_MEMORY_REGION(base + 1, ((size_t)1 << log_sz) - sizeof(AO_t));
383
    AO_stack_push(AO_free_list + log_sz, base);
15,796,370✔
384
  }
385
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc