• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 10940225480

19 Sep 2024 11:22AM UTC coverage: 92.845% (-0.06%) from 92.909%
10940225480

push

github

Buristan
FFI: Workaround for platform dlerror() returning NULL.

Contributed by mcclure.

(cherry picked from commit 478bcfe52)

The `ffi.load()` implementation assumes the string returned from
`dlerror()` is non-NULL and immediately dereferences it. This may lead
to a crash on POSIX platforms [1] where it is possible.

This patch adds the corresponding check and the default "dlopen failed"
error message.

Sergey Kaplun:
* added the description and the test for the problem

[1]: https://pubs.opengroup.org/onlinepubs/009695399/functions/dlerror.html

Part of tarantool/tarantool#10199

Reviewed-by: Sergey Bronnikov <sergeyb@tarantool.org>
Reviewed-by: Maxim Kokryashkin <m.kokryashkin@tarantool.org>
Signed-off-by: Sergey Kaplun <skaplun@tarantool.org>

5684 of 6027 branches covered (94.31%)

Branch coverage included in aggregate %.

2 of 2 new or added lines in 1 file covered. (100.0%)

26 existing lines in 5 files now uncovered.

21670 of 23435 relevant lines covered (92.47%)

2912061.21 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.11
/src/lj_alloc.c
1
/*
2
** Bundled memory allocator.
3
**
4
** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5
** The original bears the following remark:
6
**
7
**   This is a version (aka dlmalloc) of malloc/free/realloc written by
8
**   Doug Lea and released to the public domain, as explained at
9
**   http://creativecommons.org/licenses/publicdomain.
10
**
11
**   * Version pre-2.8.4 Wed Mar 29 19:46:29 2006    (dl at gee)
12
**
13
** No additional copyright is claimed over the customizations.
14
** Please do NOT bother the original author about this version here!
15
**
16
** If you want to use dlmalloc in another project, you should get
17
** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18
** For thread-safe derivatives, take a look at:
19
** - ptmalloc: http://www.malloc.de/
20
** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21
*/
22

23
#define lj_alloc_c
24
#define LUA_CORE
25

26
/* To get the mremap prototype. Must be defined before any system includes. */
27
#if defined(__linux__) && !defined(_GNU_SOURCE)
28
#define _GNU_SOURCE
29
#endif
30

31
#include "lj_def.h"
32
#include "lj_arch.h"
33
#include "lj_alloc.h"
34

35
#ifndef LUAJIT_USE_SYSMALLOC
36

37
#define MAX_SIZE_T                (~(size_t)0)
38
#define MALLOC_ALIGNMENT        ((size_t)8U)
39

40
#define DEFAULT_GRANULARITY        ((size_t)128U * (size_t)1024U)
41
#define DEFAULT_TRIM_THRESHOLD        ((size_t)2U * (size_t)1024U * (size_t)1024U)
42
#define DEFAULT_MMAP_THRESHOLD        ((size_t)128U * (size_t)1024U)
43
#define MAX_RELEASE_CHECK_RATE        255
44

45
/* ------------------- size_t and alignment properties -------------------- */
46

47
/* The byte and bit size of a size_t */
48
#define SIZE_T_SIZE                (sizeof(size_t))
49
#define SIZE_T_BITSIZE                (sizeof(size_t) << 3)
50

51
/* Some constants coerced to size_t */
52
/* Annoying but necessary to avoid errors on some platforms */
53
#define SIZE_T_ZERO                ((size_t)0)
54
#define SIZE_T_ONE                ((size_t)1)
55
#define SIZE_T_TWO                ((size_t)2)
56
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
57
#define FOUR_SIZE_T_SIZES        (SIZE_T_SIZE<<2)
58
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59

60
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
61
#define CHUNK_ALIGN_MASK        (MALLOC_ALIGNMENT - SIZE_T_ONE)
62

63
/* the number of bytes to offset an address to align it */
64
#define align_offset(A)\
65
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67

68
/* -------------------------- MMAP support ------------------------------- */
69

70
#define MFAIL                        ((void *)(MAX_SIZE_T))
71
#define CMFAIL                        ((char *)(MFAIL)) /* defined for convenience */
72

73
#define IS_DIRECT_BIT                (SIZE_T_ONE)
74

75

76
/* Determine system-specific block allocation method. */
77
#if LJ_TARGET_WINDOWS
78

79
#define WIN32_LEAN_AND_MEAN
80
#include <windows.h>
81

82
#define LJ_ALLOC_VIRTUALALLOC        1
83

84
#if LJ_64 && !LJ_GC64
85
#define LJ_ALLOC_NTAVM                1
86
#endif
87

88
#else
89

90
#include <errno.h>
91
/* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
92
#include <sys/mman.h>
93

94
#define LJ_ALLOC_MMAP                1
95

96
#if LJ_64
97

98
#define LJ_ALLOC_MMAP_PROBE        1
99

100
#if LJ_GC64
101
#define LJ_ALLOC_MBITS                47        /* 128 TB in LJ_GC64 mode. */
102
#elif LJ_TARGET_X64 && LJ_HASJIT
103
/* Due to limitations in the x64 compiler backend. */
104
#define LJ_ALLOC_MBITS                31        /* 2 GB on x64 with !LJ_GC64. */
105
#else
106
#define LJ_ALLOC_MBITS                32        /* 4 GB on other archs with !LJ_GC64. */
107
#endif
108

109
#endif
110

111
#if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
112
#define LJ_ALLOC_MMAP32                1
113
#endif
114

115
#if LJ_TARGET_LINUX
116
#define LJ_ALLOC_MREMAP                1
117
#endif
118

119
#endif
120

121

122
#if LJ_ALLOC_VIRTUALALLOC
123

124
#if LJ_ALLOC_NTAVM
125
/* Undocumented, but hey, that's what we all love so much about Windows. */
126
typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
127
                       size_t *size, ULONG alloctype, ULONG prot);
128
static PNTAVM ntavm;
129

130
/* Number of top bits of the lower 32 bits of an address that must be zero.
131
** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
132
*/
133
#define NTAVM_ZEROBITS                1
134

135
static void init_mmap(void)
136
{
137
  ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
138
                                 "NtAllocateVirtualMemory");
139
}
140
#define INIT_MMAP()        init_mmap()
141

142
/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
143
static void *CALL_MMAP(size_t size)
144
{
145
  DWORD olderr = GetLastError();
146
  void *ptr = NULL;
147
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
148
                  MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
149
  SetLastError(olderr);
150
  return st == 0 ? ptr : MFAIL;
151
}
152

153
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
154
static void *DIRECT_MMAP(size_t size)
155
{
156
  DWORD olderr = GetLastError();
157
  void *ptr = NULL;
158
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
159
                  MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
160
  SetLastError(olderr);
161
  return st == 0 ? ptr : MFAIL;
162
}
163

164
#else
165

166
/* Win32 MMAP via VirtualAlloc */
167
static void *CALL_MMAP(size_t size)
168
{
169
  DWORD olderr = GetLastError();
170
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
171
  SetLastError(olderr);
172
  return ptr ? ptr : MFAIL;
173
}
174

175
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
176
static void *DIRECT_MMAP(size_t size)
177
{
178
  DWORD olderr = GetLastError();
179
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
180
                            PAGE_READWRITE);
181
  SetLastError(olderr);
182
  return ptr ? ptr : MFAIL;
183
}
184

185
#endif
186

187
/* This function supports releasing coalesed segments */
188
static int CALL_MUNMAP(void *ptr, size_t size)
189
{
190
  DWORD olderr = GetLastError();
191
  MEMORY_BASIC_INFORMATION minfo;
192
  char *cptr = (char *)ptr;
193
  while (size) {
194
    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
195
      return -1;
196
    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
197
        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
198
      return -1;
199
    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
200
      return -1;
201
    cptr += minfo.RegionSize;
202
    size -= minfo.RegionSize;
203
  }
204
  SetLastError(olderr);
205
  return 0;
206
}
207

208
#elif LJ_ALLOC_MMAP
209

210
#define MMAP_PROT                (PROT_READ|PROT_WRITE)
211
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
212
#define MAP_ANONYMOUS                MAP_ANON
213
#endif
214
#define MMAP_FLAGS                (MAP_PRIVATE|MAP_ANONYMOUS)
215

216
#if LJ_ALLOC_MMAP_PROBE
217

218
#ifdef MAP_TRYFIXED
219
#define MMAP_FLAGS_PROBE        (MMAP_FLAGS|MAP_TRYFIXED)
220
#else
221
#define MMAP_FLAGS_PROBE        MMAP_FLAGS
222
#endif
223

224
#define LJ_ALLOC_MMAP_PROBE_MAX                30
225
#define LJ_ALLOC_MMAP_PROBE_LINEAR        5
226

227
#define LJ_ALLOC_MMAP_PROBE_LOWER        ((uintptr_t)0x4000)
228

229
/* No point in a giant ifdef mess. Just try to open /dev/urandom.
230
** It doesn't really matter if this fails, since we get some ASLR bits from
231
** every unsuitable allocation, too. And we prefer linear allocation, anyway.
232
*/
233
#include <fcntl.h>
234
#include <unistd.h>
235

236
static uintptr_t mmap_probe_seed(void)
×
237
{
238
  uintptr_t val;
×
239
  int fd = open("/dev/urandom", O_RDONLY);
×
240
  if (fd != -1) {
×
241
    int ok = ((size_t)read(fd, &val, sizeof(val)) == sizeof(val));
×
242
    (void)close(fd);
×
243
    if (ok) return val;
×
244
  }
245
  return 1;  /* Punt. */
246
}
247

248
static void *mmap_probe(size_t size)
37,553✔
249
{
250
  /* Hint for next allocation. Doesn't need to be thread-safe. */
251
  static uintptr_t hint_addr = 0;
37,553✔
252
  static uintptr_t hint_prng = 0;
37,553✔
253
  int olderr = errno;
37,553✔
254
  int retry;
37,553✔
255
  for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
37,553✔
256
    void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
37,553✔
257
    uintptr_t addr = (uintptr_t)p;
37,553✔
258
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER &&
37,553✔
259
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
37,553✔
260
      /* We got a suitable address. Bump the hint address. */
261
      hint_addr = addr + size;
37,553✔
262
      errno = olderr;
37,553✔
263
      return p;
37,553✔
264
    }
265
    if (p != MFAIL) {
×
266
      munmap(p, size);
×
267
    } else if (errno == ENOMEM) {
×
268
      return MFAIL;
269
    }
270
    if (hint_addr) {
×
271
      /* First, try linear probing. */
272
      if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
273
        hint_addr += 0x1000000;
×
274
        if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
×
275
          hint_addr = 0;
×
276
        continue;
×
277
      } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
278
        /* Next, try a no-hint probe to get back an ASLR address. */
279
        hint_addr = 0;
×
280
        continue;
×
281
      }
282
    }
283
    /* Finally, try pseudo-random probing. */
284
    if (LJ_UNLIKELY(hint_prng == 0)) {
×
285
      hint_prng = mmap_probe_seed();
×
286
    }
287
    /* The unsuitable address we got has some ASLR PRNG bits. */
288
    hint_addr ^= addr & ~((uintptr_t)(LJ_PAGESIZE-1));
×
289
    do {  /* The PRNG itself is very weak, but see above. */
×
290
      hint_prng = hint_prng * 1103515245 + 12345;
×
291
      hint_addr ^= hint_prng * (uintptr_t)LJ_PAGESIZE;
×
292
      hint_addr &= (((uintptr_t)1 << LJ_ALLOC_MBITS)-1);
×
293
    } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
×
294
  }
295
  errno = olderr;
×
296
  return MFAIL;
×
297
}
298

299
#endif
300

301
#if LJ_ALLOC_MMAP32
302

303
#if defined(__sun__)
304
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0x1000)
305
#else
306
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0)
307
#endif
308

309
static void *mmap_map32(size_t size)
310
{
311
#if LJ_ALLOC_MMAP_PROBE
312
  static int fallback = 0;
313
  if (fallback)
314
    return mmap_probe(size);
315
#endif
316
  {
317
    int olderr = errno;
318
    void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
319
    errno = olderr;
320
    /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
321
#if LJ_ALLOC_MMAP_PROBE
322
    if (ptr == MFAIL) {
323
      fallback = 1;
324
      return mmap_probe(size);
325
    }
326
#endif
327
    return ptr;
328
  }
329
}
330

331
#endif
332

333
#if LJ_ALLOC_MMAP32
334
#define CALL_MMAP(size)                mmap_map32(size)
335
#elif LJ_ALLOC_MMAP_PROBE
336
#define CALL_MMAP(size)                mmap_probe(size)
337
#else
338
static void *CALL_MMAP(size_t size)
339
{
340
  int olderr = errno;
341
  void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
342
  errno = olderr;
343
  return ptr;
344
}
345
#endif
346

347
#if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
348

349
#include <sys/resource.h>
350

351
static void init_mmap(void)
352
{
353
  struct rlimit rlim;
354
  rlim.rlim_cur = rlim.rlim_max = 0x10000;
355
  setrlimit(RLIMIT_DATA, &rlim);  /* Ignore result. May fail later. */
356
}
357
#define INIT_MMAP()        init_mmap()
358

359
#endif
360

361
static int CALL_MUNMAP(void *ptr, size_t size)
3,381✔
362
{
363
  int olderr = errno;
3,381✔
364
  int ret = munmap(ptr, size);
3,362✔
365
  errno = olderr;
3,381✔
366
  return ret;
3,381✔
367
}
368

369
#if LJ_ALLOC_MREMAP
370
/* Need to define _GNU_SOURCE to get the mremap prototype. */
371
static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
114✔
372
{
373
  int olderr = errno;
114✔
374
  ptr = mremap(ptr, osz, nsz, flags);
228✔
375
  errno = olderr;
114✔
376
  return ptr;
114✔
377
}
378

379
#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
380
#define CALL_MREMAP_NOMOVE        0
381
#define CALL_MREMAP_MAYMOVE        1
382
#if LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64)
383
#define CALL_MREMAP_MV                CALL_MREMAP_NOMOVE
384
#else
385
#define CALL_MREMAP_MV                CALL_MREMAP_MAYMOVE
386
#endif
387
#endif
388

389
#endif
390

391

392
#ifndef INIT_MMAP
393
#define INIT_MMAP()                ((void)0)
394
#endif
395

396
#ifndef DIRECT_MMAP
397
#define DIRECT_MMAP(s)                CALL_MMAP(s)
398
#endif
399

400
#ifndef CALL_MREMAP
401
#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
402
#endif
403

404
/* -----------------------  Chunk representations ------------------------ */
405

406
struct malloc_chunk {
407
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
408
  size_t               head;       /* Size and inuse bits. */
409
  struct malloc_chunk *fd;         /* double links -- used only if free. */
410
  struct malloc_chunk *bk;
411
};
412

413
typedef struct malloc_chunk  mchunk;
414
typedef struct malloc_chunk *mchunkptr;
415
typedef struct malloc_chunk *sbinptr;  /* The type of bins of chunks */
416
typedef size_t bindex_t;               /* Described below */
417
typedef unsigned int binmap_t;         /* Described below */
418
typedef unsigned int flag_t;           /* The type of various bit flag sets */
419

420
/* ------------------- Chunks sizes and alignments ----------------------- */
421

422
#define MCHUNK_SIZE                (sizeof(mchunk))
423

424
#define CHUNK_OVERHEAD                (SIZE_T_SIZE)
425

426
/* Direct chunks need a second word of overhead ... */
427
#define DIRECT_CHUNK_OVERHEAD        (TWO_SIZE_T_SIZES)
428
/* ... and additional padding for fake next-chunk at foot */
429
#define DIRECT_FOOT_PAD                (FOUR_SIZE_T_SIZES)
430

431
/* The smallest size we can malloc is an aligned minimal chunk */
432
#define MIN_CHUNK_SIZE\
433
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
434

435
/* conversion from malloc headers to user pointers, and back */
436
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
437
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
438
/* chunk associated with aligned address A */
439
#define align_as_chunk(A)        (mchunkptr)((A) + align_offset(chunk2mem(A)))
440

441
/* Bounds on request (not chunk) sizes. */
442
#define MAX_REQUEST                ((~MIN_CHUNK_SIZE+1) << 2)
443
#define MIN_REQUEST                (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
444

445
/* pad request bytes into a usable size */
446
#define pad_request(req) \
447
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
448

449
/* pad request, checking for minimum (but not maximum) */
450
#define request2size(req) \
451
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
452

453
/* ------------------ Operations on head and foot fields ----------------- */
454

455
#define PINUSE_BIT                (SIZE_T_ONE)
456
#define CINUSE_BIT                (SIZE_T_TWO)
457
#define INUSE_BITS                (PINUSE_BIT|CINUSE_BIT)
458

459
/* Head value for fenceposts */
460
#define FENCEPOST_HEAD                (INUSE_BITS|SIZE_T_SIZE)
461

462
/* extraction of fields from head words */
463
#define cinuse(p)                ((p)->head & CINUSE_BIT)
464
#define pinuse(p)                ((p)->head & PINUSE_BIT)
465
#define chunksize(p)                ((p)->head & ~(INUSE_BITS))
466

467
#define clear_pinuse(p)                ((p)->head &= ~PINUSE_BIT)
468
#define clear_cinuse(p)                ((p)->head &= ~CINUSE_BIT)
469

470
/* Treat space at ptr +/- offset as a chunk */
471
#define chunk_plus_offset(p, s)                ((mchunkptr)(((char *)(p)) + (s)))
472
#define chunk_minus_offset(p, s)        ((mchunkptr)(((char *)(p)) - (s)))
473

474
/* Ptr to next or previous physical malloc_chunk. */
475
#define next_chunk(p)        ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
476
#define prev_chunk(p)        ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
477

478
/* extract next chunk's pinuse bit */
479
#define next_pinuse(p)        ((next_chunk(p)->head) & PINUSE_BIT)
480

481
/* Get/set size at footer */
482
#define get_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot)
483
#define set_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
484

485
/* Set size, pinuse bit, and foot */
486
#define set_size_and_pinuse_of_free_chunk(p, s)\
487
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
488

489
/* Set size, pinuse bit, foot, and clear next pinuse */
490
#define set_free_with_pinuse(p, s, n)\
491
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
492

493
#define is_direct(p)\
494
  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
495

496
/* Get the internal overhead associated with chunk p */
497
#define overhead_for(p)\
498
 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
499

500
/* ---------------------- Overlaid data structures ----------------------- */
501

502
struct malloc_tree_chunk {
503
  /* The first four fields must be compatible with malloc_chunk */
504
  size_t                    prev_foot;
505
  size_t                    head;
506
  struct malloc_tree_chunk *fd;
507
  struct malloc_tree_chunk *bk;
508

509
  struct malloc_tree_chunk *child[2];
510
  struct malloc_tree_chunk *parent;
511
  bindex_t                  index;
512
};
513

514
typedef struct malloc_tree_chunk  tchunk;
515
typedef struct malloc_tree_chunk *tchunkptr;
516
typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
517

518
/* A little helper macro for trees */
519
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
520

521
/* ----------------------------- Segments -------------------------------- */
522

523
struct malloc_segment {
524
  char        *base;             /* base address */
525
  size_t       size;             /* allocated size */
526
  struct malloc_segment *next;   /* ptr to next segment */
527
};
528

529
typedef struct malloc_segment  msegment;
530
typedef struct malloc_segment *msegmentptr;
531

532
/* ---------------------------- malloc_state ----------------------------- */
533

534
/* Bin types, widths and sizes */
535
#define NSMALLBINS                (32U)
536
#define NTREEBINS                (32U)
537
#define SMALLBIN_SHIFT                (3U)
538
#define SMALLBIN_WIDTH                (SIZE_T_ONE << SMALLBIN_SHIFT)
539
#define TREEBIN_SHIFT                (8U)
540
#define MIN_LARGE_SIZE                (SIZE_T_ONE << TREEBIN_SHIFT)
541
#define MAX_SMALL_SIZE                (MIN_LARGE_SIZE - SIZE_T_ONE)
542
#define MAX_SMALL_REQUEST  (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
543

544
struct malloc_state {
545
  binmap_t   smallmap;
546
  binmap_t   treemap;
547
  size_t     dvsize;
548
  size_t     topsize;
549
  mchunkptr  dv;
550
  mchunkptr  top;
551
  size_t     trim_check;
552
  size_t     release_checks;
553
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
554
  tbinptr    treebins[NTREEBINS];
555
  msegment   seg;
556
};
557

558
typedef struct malloc_state *mstate;
559

560
#define is_initialized(M)        ((M)->top != 0)
561

562
/* -------------------------- system alloc setup ------------------------- */
563

564
/* page-align a size */
565
#define page_align(S)\
566
 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
567

568
/* granularity-align a size */
569
#define granularity_align(S)\
570
  (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
571
   & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
572

573
#if LJ_TARGET_WINDOWS
574
#define mmap_align(S)        granularity_align(S)
575
#else
576
#define mmap_align(S)        page_align(S)
577
#endif
578

579
/*  True if segment S holds address A */
580
#define segment_holds(S, A)\
581
  ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
582

583
/* Return segment holding given address */
584
static msegmentptr segment_holding(mstate m, char *addr)
71✔
585
{
586
  msegmentptr sp = &m->seg;
71✔
587
  for (;;) {
71✔
588
    if (addr >= sp->base && addr < sp->base + sp->size)
71✔
589
      return sp;
590
    if ((sp = sp->next) == 0)
×
591
      return 0;
592
  }
593
}
594

595
/* Return true if segment contains a segment link */
596
static int has_segment_link(mstate m, msegmentptr ss)
19✔
597
{
598
  msegmentptr sp = &m->seg;
19✔
599
  for (;;) {
79✔
600
    if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
79✔
601
      return 1;
602
    if ((sp = sp->next) == 0)
79✔
603
      return 0;
604
  }
605
}
606

607
/*
608
  TOP_FOOT_SIZE is padding at the end of a segment, including space
609
  that may be needed to place segment records and fenceposts when new
610
  noncontiguous segments are added.
611
*/
612
#define TOP_FOOT_SIZE\
613
  (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
614

615
/* ---------------------------- Indexing Bins ---------------------------- */
616

617
#define is_small(s)                (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
618
#define small_index(s)                ((s)  >> SMALLBIN_SHIFT)
619
#define small_index2size(i)        ((i)  << SMALLBIN_SHIFT)
620
#define MIN_SMALL_INDEX                (small_index(MIN_CHUNK_SIZE))
621

622
/* addressing by index. See above about smallbin repositioning */
623
#define smallbin_at(M, i)        ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
624
#define treebin_at(M,i)                (&((M)->treebins[i]))
625

626
/* assign tree index for size S to variable I */
627
#define compute_tree_index(S, I)\
628
{\
629
  unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
630
  if (X == 0) {\
631
    I = 0;\
632
  } else if (X > 0xFFFF) {\
633
    I = NTREEBINS-1;\
634
  } else {\
635
    unsigned int K = lj_fls(X);\
636
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
637
  }\
638
}
639

640
/* Bit representing maximum resolved size in a treebin at i */
641
#define bit_for_tree_index(i) \
642
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
643

644
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
645
#define leftshift_for_tree_index(i) \
646
   ((i == NTREEBINS-1)? 0 : \
647
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
648

649
/* The size of the smallest chunk held in bin with index i */
650
#define minsize_for_tree_index(i) \
651
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
652
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
653

654
/* ------------------------ Operations on bin maps ----------------------- */
655

656
/* bit corresponding to given index */
657
#define idx2bit(i)                ((binmap_t)(1) << (i))
658

659
/* Mark/Clear bits with given index */
660
#define mark_smallmap(M,i)        ((M)->smallmap |=  idx2bit(i))
661
#define clear_smallmap(M,i)        ((M)->smallmap &= ~idx2bit(i))
662
#define smallmap_is_marked(M,i)        ((M)->smallmap &   idx2bit(i))
663

664
#define mark_treemap(M,i)        ((M)->treemap  |=  idx2bit(i))
665
#define clear_treemap(M,i)        ((M)->treemap  &= ~idx2bit(i))
666
#define treemap_is_marked(M,i)        ((M)->treemap  &   idx2bit(i))
667

668
/* mask with all bits to left of least bit of x on */
669
#define left_bits(x)                ((x<<1) | (~(x<<1)+1))
670

671
/* Set cinuse bit and pinuse bit of next chunk */
672
#define set_inuse(M,p,s)\
673
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
674
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
675

676
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
677
#define set_inuse_and_pinuse(M,p,s)\
678
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
679
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
680

681
/* Set size, cinuse and pinuse bit of this chunk */
682
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
683
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
684

685
/* ----------------------- Operations on smallbins ----------------------- */
686

687
/* Link a free chunk into a smallbin  */
688
#define insert_small_chunk(M, P, S) {\
689
  bindex_t I = small_index(S);\
690
  mchunkptr B = smallbin_at(M, I);\
691
  mchunkptr F = B;\
692
  if (!smallmap_is_marked(M, I))\
693
    mark_smallmap(M, I);\
694
  else\
695
    F = B->fd;\
696
  B->fd = P;\
697
  F->bk = P;\
698
  P->fd = F;\
699
  P->bk = B;\
700
}
701

702
/* Unlink a chunk from a smallbin  */
703
#define unlink_small_chunk(M, P, S) {\
704
  mchunkptr F = P->fd;\
705
  mchunkptr B = P->bk;\
706
  bindex_t I = small_index(S);\
707
  if (F == B) {\
708
    clear_smallmap(M, I);\
709
  } else {\
710
    F->bk = B;\
711
    B->fd = F;\
712
  }\
713
}
714

715
/* Unlink the first chunk from a smallbin */
716
#define unlink_first_small_chunk(M, B, P, I) {\
717
  mchunkptr F = P->fd;\
718
  if (B == F) {\
719
    clear_smallmap(M, I);\
720
  } else {\
721
    B->fd = F;\
722
    F->bk = B;\
723
  }\
724
}
725

726
/* Replace dv node, binning the old one */
727
/* Used only when dvsize known to be small */
728
#define replace_dv(M, P, S) {\
729
  size_t DVS = M->dvsize;\
730
  if (DVS != 0) {\
731
    mchunkptr DV = M->dv;\
732
    insert_small_chunk(M, DV, DVS);\
733
  }\
734
  M->dvsize = S;\
735
  M->dv = P;\
736
}
737

738
/* ------------------------- Operations on trees ------------------------- */
739

740
/* Insert chunk into tree */
741
#define insert_large_chunk(M, X, S) {\
742
  tbinptr *H;\
743
  bindex_t I;\
744
  compute_tree_index(S, I);\
745
  H = treebin_at(M, I);\
746
  X->index = I;\
747
  X->child[0] = X->child[1] = 0;\
748
  if (!treemap_is_marked(M, I)) {\
749
    mark_treemap(M, I);\
750
    *H = X;\
751
    X->parent = (tchunkptr)H;\
752
    X->fd = X->bk = X;\
753
  } else {\
754
    tchunkptr T = *H;\
755
    size_t K = S << leftshift_for_tree_index(I);\
756
    for (;;) {\
757
      if (chunksize(T) != S) {\
758
        tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
759
        K <<= 1;\
760
        if (*C != 0) {\
761
          T = *C;\
762
        } else {\
763
          *C = X;\
764
          X->parent = T;\
765
          X->fd = X->bk = X;\
766
          break;\
767
        }\
768
      } else {\
769
        tchunkptr F = T->fd;\
770
        T->fd = F->bk = X;\
771
        X->fd = F;\
772
        X->bk = T;\
773
        X->parent = 0;\
774
        break;\
775
      }\
776
    }\
777
  }\
778
}
779

780
#define unlink_large_chunk(M, X) {\
781
  tchunkptr XP = X->parent;\
782
  tchunkptr R;\
783
  if (X->bk != X) {\
784
    tchunkptr F = X->fd;\
785
    R = X->bk;\
786
    F->bk = R;\
787
    R->fd = F;\
788
  } else {\
789
    tchunkptr *RP;\
790
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
791
        ((R = *(RP = &(X->child[0]))) != 0)) {\
792
      tchunkptr *CP;\
793
      while ((*(CP = &(R->child[1])) != 0) ||\
794
             (*(CP = &(R->child[0])) != 0)) {\
795
        R = *(RP = CP);\
796
      }\
797
      *RP = 0;\
798
    }\
799
  }\
800
  if (XP != 0) {\
801
    tbinptr *H = treebin_at(M, X->index);\
802
    if (X == *H) {\
803
      if ((*H = R) == 0) \
804
        clear_treemap(M, X->index);\
805
    } else {\
806
      if (XP->child[0] == X) \
807
        XP->child[0] = R;\
808
      else \
809
        XP->child[1] = R;\
810
    }\
811
    if (R != 0) {\
812
      tchunkptr C0, C1;\
813
      R->parent = XP;\
814
      if ((C0 = X->child[0]) != 0) {\
815
        R->child[0] = C0;\
816
        C0->parent = R;\
817
      }\
818
      if ((C1 = X->child[1]) != 0) {\
819
        R->child[1] = C1;\
820
        C1->parent = R;\
821
      }\
822
    }\
823
  }\
824
}
825

826
/* Relays to large vs small bin operations */
827

828
#define insert_chunk(M, P, S)\
829
  if (is_small(S)) { insert_small_chunk(M, P, S)\
830
  } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
831

832
#define unlink_chunk(M, P, S)\
833
  if (is_small(S)) { unlink_small_chunk(M, P, S)\
834
  } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
835

836
/* -----------------------  Direct-mmapping chunks ----------------------- */
837

838
static void *direct_alloc(size_t nb)
2,997✔
839
{
840
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2,997✔
841
  if (LJ_LIKELY(mmsize > nb)) {     /* Check for wrap around 0 */
2,997✔
842
    char *mm = (char *)(DIRECT_MMAP(mmsize));
2,997✔
843
    if (mm != CMFAIL) {
2,997✔
844
      size_t offset = align_offset(chunk2mem(mm));
2,997✔
845
      size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
2,997✔
846
      mchunkptr p = (mchunkptr)(mm + offset);
2,997✔
847
      p->prev_foot = offset | IS_DIRECT_BIT;
2,997✔
848
      p->head = psize|CINUSE_BIT;
2,997✔
849
      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2,997✔
850
      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2,997✔
851
      return chunk2mem(p);
2,997✔
852
    }
853
  }
854
  return NULL;
855
}
856

857
static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
173✔
858
{
859
  size_t oldsize = chunksize(oldp);
173✔
860
  if (is_small(nb)) /* Can't shrink direct regions below small size */
173✔
861
    return NULL;
862
  /* Keep old chunk if big enough but not too big */
863
  if (oldsize >= nb + SIZE_T_SIZE &&
170✔
864
      (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
110✔
865
    return oldp;
866
  } else {
867
    size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
95✔
868
    size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
95✔
869
    size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
95✔
870
    char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
95✔
871
                                   oldmmsize, newmmsize, CALL_MREMAP_MV);
872
    if (cp != CMFAIL) {
95✔
873
      mchunkptr newp = (mchunkptr)(cp + offset);
95✔
874
      size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
95✔
875
      newp->head = psize|CINUSE_BIT;
95✔
876
      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
95✔
877
      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
95✔
878
      return newp;
95✔
879
    }
880
  }
881
  return NULL;
882
}
883

884
/* -------------------------- mspace management -------------------------- */
885

886
/* Initialize top chunk and its size */
887
static void init_top(mstate m, mchunkptr p, size_t psize)
534✔
888
{
889
  /* Ensure alignment */
890
  size_t offset = align_offset(chunk2mem(p));
×
891
  p = (mchunkptr)((char *)p + offset);
534✔
892
  psize -= offset;
534✔
893

894
  m->top = p;
534✔
895
  m->topsize = psize;
534✔
896
  p->head = psize | PINUSE_BIT;
534✔
897
  /* set size of fake trailing chunk holding overhead space only once */
898
  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
534✔
899
  m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
534✔
900
}
140✔
901

902
/* Initialize bins for a new mstate that is otherwise zeroed out */
903
static void init_bins(mstate m)
342✔
904
{
905
  /* Establish circular links for smallbins */
906
  bindex_t i;
342✔
907
  for (i = 0; i < NSMALLBINS; i++) {
11,286✔
908
    sbinptr bin = smallbin_at(m,i);
10,944✔
909
    bin->fd = bin->bk = bin;
10,944✔
910
  }
911
}
912

913
/* Allocate chunk and prepend remainder with chunk in successor base. */
914
static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
34,041✔
915
{
916
  mchunkptr p = align_as_chunk(newbase);
34,041✔
917
  mchunkptr oldfirst = align_as_chunk(oldbase);
34,041✔
918
  size_t psize = (size_t)((char *)oldfirst - (char *)p);
34,041✔
919
  mchunkptr q = chunk_plus_offset(p, nb);
34,041✔
920
  size_t qsize = psize - nb;
34,041✔
921
  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
34,041✔
922

923
  /* consolidate remainder with first chunk of old base */
924
  if (oldfirst == m->top) {
34,041✔
925
    size_t tsize = m->topsize += qsize;
×
926
    m->top = q;
×
927
    q->head = tsize | PINUSE_BIT;
×
928
  } else if (oldfirst == m->dv) {
34,041✔
929
    size_t dsize = m->dvsize += qsize;
×
930
    m->dv = q;
×
931
    set_size_and_pinuse_of_free_chunk(q, dsize);
×
932
  } else {
933
    if (!cinuse(oldfirst)) {
34,041✔
934
      size_t nsize = chunksize(oldfirst);
13✔
935
      unlink_chunk(m, oldfirst, nsize);
15✔
936
      oldfirst = chunk_plus_offset(oldfirst, nsize);
13✔
937
      qsize += nsize;
13✔
938
    }
939
    set_free_with_pinuse(q, qsize, oldfirst);
34,041✔
940
    insert_chunk(m, q, qsize);
34,041✔
941
  }
942

943
  return chunk2mem(p);
34,041✔
944
}
945

946
/* Add a segment to hold a new noncontiguous region */
947
static void add_segment(mstate m, char *tbase, size_t tsize)
52✔
948
{
949
  /* Determine locations and sizes of segment, fenceposts, old top */
950
  char *old_top = (char *)m->top;
52✔
951
  msegmentptr oldsp = segment_holding(m, old_top);
52✔
952
  char *old_end = oldsp->base + oldsp->size;
52✔
953
  size_t ssize = pad_request(sizeof(struct malloc_segment));
52✔
954
  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
52✔
955
  size_t offset = align_offset(chunk2mem(rawsp));
52✔
956
  char *asp = rawsp + offset;
52✔
957
  char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
52✔
958
  mchunkptr sp = (mchunkptr)csp;
52✔
959
  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
52✔
960
  mchunkptr tnext = chunk_plus_offset(sp, ssize);
52✔
961
  mchunkptr p = tnext;
52✔
962

963
  /* reset top to new space */
964
  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
52✔
965

966
  /* Set up segment record */
967
  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
52✔
968
  *ss = m->seg; /* Push current record */
52✔
969
  m->seg.base = tbase;
52✔
970
  m->seg.size = tsize;
52✔
971
  m->seg.next = ss;
52✔
972

973
  /* Insert trailing fenceposts */
974
  for (;;) {
169✔
975
    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
169✔
976
    p->head = FENCEPOST_HEAD;
169✔
977
    if ((char *)(&(nextp->head)) < old_end)
169✔
978
      p = nextp;
979
    else
980
      break;
981
  }
982

983
  /* Insert the rest of old top into a bin as an ordinary free chunk */
984
  if (csp != old_top) {
52✔
985
    mchunkptr q = (mchunkptr)old_top;
47✔
986
    size_t psize = (size_t)(csp - old_top);
47✔
987
    mchunkptr tn = chunk_plus_offset(q, psize);
47✔
988
    set_free_with_pinuse(q, psize, tn);
47✔
989
    insert_chunk(m, q, psize);
56✔
990
  }
991
}
52✔
992

993
/* -------------------------- System allocation -------------------------- */
994

995
static void *alloc_sys(mstate m, size_t nb)
37,211✔
996
{
997
  char *tbase = CMFAIL;
37,211✔
998
  size_t tsize = 0;
37,211✔
999

1000
  /* Directly map large chunks */
1001
  if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
37,211✔
1002
    void *mem = direct_alloc(nb);
2,997✔
1003
    if (mem != 0)
2,997✔
1004
      return mem;
1005
  }
1006

1007
  {
1008
    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
34,214✔
1009
    size_t rsize = granularity_align(req);
34,214✔
1010
    if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
34,214✔
1011
      char *mp = (char *)(CALL_MMAP(rsize));
34,214✔
1012
      if (mp != CMFAIL) {
34,214✔
1013
        tbase = mp;
34,214✔
1014
        tsize = rsize;
34,214✔
1015
      }
1016
    }
1017
  }
1018

1019
  if (tbase != CMFAIL) {
34,214✔
1020
    msegmentptr sp = &m->seg;
34,214✔
1021
    /* Try to merge with an existing segment */
1022
    while (sp != 0 && tbase != sp->base + sp->size)
280,542✔
1023
      sp = sp->next;
246,328✔
1024
    if (sp != 0 && segment_holds(sp, m->top)) { /* append */
34,214✔
1025
      sp->size += tsize;
121✔
1026
      init_top(m, m->top, m->topsize + tsize);
121✔
1027
    } else {
1028
      sp = &m->seg;
1029
      while (sp != 0 && sp->base != tbase + tsize)
46,691✔
1030
        sp = sp->next;
12,598✔
1031
      if (sp != 0) {
34,093✔
1032
        char *oldbase = sp->base;
34,041✔
1033
        sp->base = tbase;
34,041✔
1034
        sp->size += tsize;
34,041✔
1035
        return prepend_alloc(m, tbase, oldbase, nb);
34,041✔
1036
      } else {
1037
        add_segment(m, tbase, tsize);
52✔
1038
      }
1039
    }
1040

1041
    if (nb < m->topsize) { /* Allocate from new or extended top space */
173✔
1042
      size_t rsize = m->topsize -= nb;
173✔
1043
      mchunkptr p = m->top;
173✔
1044
      mchunkptr r = m->top = chunk_plus_offset(p, nb);
173✔
1045
      r->head = rsize | PINUSE_BIT;
173✔
1046
      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
173✔
1047
      return chunk2mem(p);
173✔
1048
    }
1049
  }
1050

1051
  return NULL;
1052
}
1053

1054
/* -----------------------  system deallocation -------------------------- */
1055

1056
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1057
static size_t release_unused_segments(mstate m)
543,615✔
1058
{
1059
  size_t released = 0;
543,615✔
1060
  size_t nsegs = 0;
543,615✔
1061
  msegmentptr pred = &m->seg;
543,615✔
1062
  msegmentptr sp = pred->next;
543,615✔
1063
  while (sp != 0) {
4,420,444✔
1064
    char *base = sp->base;
3,876,829✔
1065
    size_t size = sp->size;
3,876,829✔
1066
    msegmentptr next = sp->next;
3,876,829✔
1067
    nsegs++;
3,876,829✔
1068
    {
1069
      mchunkptr p = align_as_chunk(base);
3,876,829✔
1070
      size_t psize = chunksize(p);
3,876,829✔
1071
      /* Can unmap if first chunk holds entire segment and not pinned */
1072
      if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
3,876,829✔
1073
        tchunkptr tp = (tchunkptr)p;
19✔
1074
        if (p == m->dv) {
19✔
UNCOV
1075
          m->dv = 0;
×
UNCOV
1076
          m->dvsize = 0;
×
1077
        } else {
1078
          unlink_large_chunk(m, tp);
20✔
1079
        }
1080
        if (CALL_MUNMAP(base, size) == 0) {
19✔
1081
          released += size;
19✔
1082
          /* unlink obsoleted record */
1083
          sp = pred;
19✔
1084
          sp->next = next;
19✔
1085
        } else { /* back out if cannot unmap */
1086
          insert_large_chunk(m, tp, psize);
×
1087
        }
1088
      }
1089
    }
1090
    pred = sp;
1091
    sp = next;
1092
  }
1093
  /* Reset check counter */
1094
  m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
543,615✔
1095
                      nsegs : MAX_RELEASE_CHECK_RATE;
543,615✔
1096
  return released;
543,615✔
1097
}
1098

1099
static int alloc_trim(mstate m, size_t pad)
19✔
1100
{
1101
  size_t released = 0;
19✔
1102
  if (pad < MAX_REQUEST && is_initialized(m)) {
19✔
1103
    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
19✔
1104

1105
    if (m->topsize > pad) {
19✔
1106
      /* Shrink top space in granularity-size units, keeping at least one */
1107
      size_t unit = DEFAULT_GRANULARITY;
19✔
1108
      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
19✔
1109
                      SIZE_T_ONE) * unit;
1110
      msegmentptr sp = segment_holding(m, (char *)m->top);
19✔
1111

1112
      if (sp->size >= extra &&
19✔
1113
          !has_segment_link(m, sp)) { /* can't shrink if pinned */
38✔
1114
        size_t newsize = sp->size - extra;
19✔
1115
        /* Prefer mremap, fall back to munmap */
1116
        if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
19✔
1117
            (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
×
1118
          released = extra;
19✔
1119
        }
1120
      }
1121

1122
      if (released != 0) {
19✔
1123
        sp->size -= released;
19✔
1124
        init_top(m, m->top, m->topsize - released);
19✔
1125
      }
1126
    }
1127

1128
    /* Unmap any unused mmapped segments */
1129
    released += release_unused_segments(m);
19✔
1130

1131
    /* On failure, disable autotrim to avoid repeated failed future calls */
1132
    if (released == 0 && m->topsize > m->trim_check)
19✔
1133
      m->trim_check = MAX_SIZE_T;
×
1134
  }
1135

1136
  return (released != 0)? 1 : 0;
19✔
1137
}
1138

1139
/* ---------------------------- malloc support --------------------------- */
1140

1141
/* allocate a large request from the best fitting chunk in a treebin */
1142
static void *tmalloc_large(mstate m, size_t nb)
290,612✔
1143
{
1144
  tchunkptr v = 0;
290,612✔
1145
  size_t rsize = ~nb+1; /* Unsigned negation */
290,612✔
1146
  tchunkptr t;
290,612✔
1147
  bindex_t idx;
290,612✔
1148
  compute_tree_index(nb, idx);
290,612✔
1149

1150
  if ((t = *treebin_at(m, idx)) != 0) {
290,612✔
1151
    /* Traverse tree for this bin looking for node with size == nb */
1152
    size_t sizebits = nb << leftshift_for_tree_index(idx);
122,723✔
1153
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
122,723✔
1154
    for (;;) {
253,343✔
1155
      tchunkptr rt;
188,033✔
1156
      size_t trem = chunksize(t) - nb;
188,033✔
1157
      if (trem < rsize) {
188,033✔
1158
        v = t;
106,057✔
1159
        if ((rsize = trem) == 0)
106,057✔
1160
          break;
1161
      }
1162
      rt = t->child[1];
166,129✔
1163
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
166,129✔
1164
      if (rt != 0 && rt != t)
166,129✔
1165
        rst = rt;
53,623✔
1166
      if (t == 0) {
166,129✔
1167
        t = rst; /* set t to least subtree holding sizes > nb */
1168
        break;
1169
      }
1170
      sizebits <<= 1;
65,310✔
1171
    }
1172
  }
1173

1174
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
290,612✔
1175
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
223,101✔
1176
    if (leftbits != 0)
223,101✔
1177
      t = *treebin_at(m, lj_ffs(leftbits));
213,040✔
1178
  }
1179

1180
  while (t != 0) { /* find smallest of tree or subtree */
668,849✔
1181
    size_t trem = chunksize(t) - nb;
378,237✔
1182
    if (trem < rsize) {
378,237✔
1183
      rsize = trem;
291,019✔
1184
      v = t;
291,019✔
1185
    }
1186
    t = leftmost_child(t);
378,237✔
1187
  }
1188

1189
  /*  If dv is a better fit, return NULL so malloc will use it */
1190
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
290,612✔
1191
    mchunkptr r = chunk_plus_offset(v, nb);
239,043✔
1192
    unlink_large_chunk(m, v);
254,093✔
1193
    if (rsize < MIN_CHUNK_SIZE) {
239,043✔
1194
      set_inuse_and_pinuse(m, v, (rsize + nb));
35,870✔
1195
    } else {
1196
      set_size_and_pinuse_of_inuse_chunk(m, v, nb);
203,173✔
1197
      set_size_and_pinuse_of_free_chunk(r, rsize);
203,173✔
1198
      insert_chunk(m, r, rsize);
233,069✔
1199
    }
1200
    return chunk2mem(v);
239,043✔
1201
  }
1202
  return NULL;
1203
}
1204

1205
/* allocate a small request from the best fitting chunk in a treebin */
1206
static void *tmalloc_small(mstate m, size_t nb)
201,894✔
1207
{
1208
  tchunkptr t, v;
201,894✔
1209
  mchunkptr r;
201,894✔
1210
  size_t rsize;
201,894✔
1211
  bindex_t i = lj_ffs(m->treemap);
201,894✔
1212

1213
  v = t = *treebin_at(m, i);
201,894✔
1214
  rsize = chunksize(t) - nb;
201,894✔
1215

1216
  while ((t = leftmost_child(t)) != 0) {
430,973✔
1217
    size_t trem = chunksize(t) - nb;
229,079✔
1218
    if (trem < rsize) {
229,079✔
1219
      rsize = trem;
121,225✔
1220
      v = t;
121,225✔
1221
    }
1222
  }
1223

1224
  r = chunk_plus_offset(v, nb);
201,894✔
1225
  unlink_large_chunk(m, v);
247,766✔
1226
  if (rsize < MIN_CHUNK_SIZE) {
201,894✔
1227
    set_inuse_and_pinuse(m, v, (rsize + nb));
28✔
1228
  } else {
1229
    set_size_and_pinuse_of_inuse_chunk(m, v, nb);
201,866✔
1230
    set_size_and_pinuse_of_free_chunk(r, rsize);
201,866✔
1231
    replace_dv(m, r, rsize);
201,866✔
1232
  }
1233
  return chunk2mem(v);
201,894✔
1234
}
1235

1236
/* ----------------------------------------------------------------------- */
1237

1238
void *lj_alloc_create(void)
342✔
1239
{
1240
  size_t tsize = DEFAULT_GRANULARITY;
342✔
1241
  char *tbase;
342✔
1242
  INIT_MMAP();
342✔
1243
  tbase = (char *)(CALL_MMAP(tsize));
342✔
1244
  if (tbase != CMFAIL) {
342✔
1245
    size_t msize = pad_request(sizeof(struct malloc_state));
342✔
1246
    mchunkptr mn;
342✔
1247
    mchunkptr msp = align_as_chunk(tbase);
342✔
1248
    mstate m = (mstate)(chunk2mem(msp));
342✔
1249
    memset(m, 0, msize);
342✔
1250
    msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
342✔
1251
    m->seg.base = tbase;
342✔
1252
    m->seg.size = tsize;
342✔
1253
    m->release_checks = MAX_RELEASE_CHECK_RATE;
342✔
1254
    init_bins(m);
342✔
1255
    mn = next_chunk(mem2chunk(m));
342✔
1256
    init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
342✔
1257
    return m;
342✔
1258
  }
1259
  return NULL;
1260
}
1261

1262
void lj_alloc_destroy(void *msp)
332✔
1263
{
1264
  mstate ms = (mstate)msp;
332✔
1265
  msegmentptr sp = &ms->seg;
332✔
1266
  while (sp != 0) {
697✔
1267
    char *base = sp->base;
365✔
1268
    size_t size = sp->size;
365✔
1269
    sp = sp->next;
365✔
1270
    CALL_MUNMAP(base, size);
365✔
1271
  }
1272
}
332✔
1273

1274
static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
144,512,055✔
1275
{
1276
  mstate ms = (mstate)msp;
144,512,055✔
1277
  void *mem;
144,512,055✔
1278
  size_t nb;
144,512,055✔
1279
  if (nsize <= MAX_SMALL_REQUEST) {
144,512,055✔
1280
    bindex_t idx;
144,188,774✔
1281
    binmap_t smallbits;
144,188,774✔
1282
    nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
144,188,774✔
1283
    idx = small_index(nb);
144,188,774✔
1284
    smallbits = ms->smallmap >> idx;
144,188,774✔
1285

1286
    if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
144,188,774✔
1287
      mchunkptr b, p;
472,510✔
1288
      idx += ~smallbits & 1;       /* Uses next bin if idx empty */
472,510✔
1289
      b = smallbin_at(ms, idx);
472,510✔
1290
      p = b->fd;
472,510✔
1291
      unlink_first_small_chunk(ms, b, p, idx);
472,510✔
1292
      set_inuse_and_pinuse(ms, p, small_index2size(idx));
472,510✔
1293
      mem = chunk2mem(p);
472,510✔
1294
      return mem;
472,510✔
1295
    } else if (nb > ms->dvsize) {
143,716,264✔
1296
      if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
877,667✔
1297
        mchunkptr b, p, r;
134,543✔
1298
        size_t rsize;
134,543✔
1299
        binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
134,543✔
1300
        bindex_t i = lj_ffs(leftbits);
134,543✔
1301
        b = smallbin_at(ms, i);
134,543✔
1302
        p = b->fd;
134,543✔
1303
        unlink_first_small_chunk(ms, b, p, i);
134,543✔
1304
        rsize = small_index2size(i) - nb;
134,543✔
1305
        /* Fit here cannot be remainderless if 4byte sizes */
1306
        if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
134,543✔
1307
          set_inuse_and_pinuse(ms, p, small_index2size(i));
25,927✔
1308
        } else {
1309
          set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
108,616✔
1310
          r = chunk_plus_offset(p, nb);
108,616✔
1311
          set_size_and_pinuse_of_free_chunk(r, rsize);
108,616✔
1312
          replace_dv(ms, r, rsize);
108,616✔
1313
        }
1314
        mem = chunk2mem(p);
134,543✔
1315
        return mem;
134,543✔
1316
      } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
743,124✔
1317
        return mem;
1318
      }
1319
    }
1320
  } else if (nsize >= MAX_REQUEST) {
323,281✔
1321
    nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1322
  } else {
1323
    nb = pad_request(nsize);
323,281✔
1324
    if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
323,281✔
1325
      return mem;
1326
    }
1327
  }
1328

1329
  if (nb <= ms->dvsize) {
143,464,065✔
1330
    size_t rsize = ms->dvsize - nb;
142,895,083✔
1331
    mchunkptr p = ms->dv;
142,895,083✔
1332
    if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
142,895,083✔
1333
      mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
142,699,993✔
1334
      ms->dvsize = rsize;
142,699,993✔
1335
      set_size_and_pinuse_of_free_chunk(r, rsize);
142,699,993✔
1336
      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
142,699,993✔
1337
    } else { /* exhaust dv */
1338
      size_t dvs = ms->dvsize;
195,090✔
1339
      ms->dvsize = 0;
195,090✔
1340
      ms->dv = 0;
195,090✔
1341
      set_inuse_and_pinuse(ms, p, dvs);
195,090✔
1342
    }
1343
    mem = chunk2mem(p);
142,895,083✔
1344
    return mem;
142,895,083✔
1345
  } else if (nb < ms->topsize) { /* Split top */
568,982✔
1346
    size_t rsize = ms->topsize -= nb;
531,771✔
1347
    mchunkptr p = ms->top;
531,771✔
1348
    mchunkptr r = ms->top = chunk_plus_offset(p, nb);
531,771✔
1349
    r->head = rsize | PINUSE_BIT;
531,771✔
1350
    set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
531,771✔
1351
    mem = chunk2mem(p);
531,771✔
1352
    return mem;
531,771✔
1353
  }
1354
  return alloc_sys(ms, nb);
37,211✔
1355
}
1356

1357
static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
144,532,961✔
1358
{
1359
  if (ptr != 0) {
144,532,961✔
1360
    mchunkptr p = mem2chunk(ptr);
144,504,294✔
1361
    mstate fm = (mstate)msp;
144,504,294✔
1362
    size_t psize = chunksize(p);
144,504,294✔
1363
    mchunkptr next = chunk_plus_offset(p, psize);
144,504,294✔
1364
    if (!pinuse(p)) {
144,504,294✔
1365
      size_t prevsize = p->prev_foot;
2,671,027✔
1366
      if ((prevsize & IS_DIRECT_BIT) != 0) {
2,671,027✔
1367
        prevsize &= ~IS_DIRECT_BIT;
2,997✔
1368
        psize += prevsize + DIRECT_FOOT_PAD;
2,997✔
1369
        CALL_MUNMAP((char *)p - prevsize, psize);
2,997✔
1370
        return NULL;
2,997✔
1371
      } else {
1372
        mchunkptr prev = chunk_minus_offset(p, prevsize);
2,668,030✔
1373
        psize += prevsize;
2,668,030✔
1374
        p = prev;
2,668,030✔
1375
        /* consolidate backward */
1376
        if (p != fm->dv) {
2,668,030✔
1377
          unlink_chunk(fm, p, prevsize);
2,657,612✔
1378
        } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
81,376✔
1379
          fm->dvsize = psize;
4,444✔
1380
          set_free_with_pinuse(p, psize, next);
4,444✔
1381
          return NULL;
4,444✔
1382
        }
1383
      }
1384
    }
1385
    if (!cinuse(next)) {  /* consolidate forward */
144,496,853✔
1386
      if (next == fm->top) {
141,137,700✔
1387
        size_t tsize = fm->topsize += psize;
124,442✔
1388
        fm->top = p;
124,442✔
1389
        p->head = tsize | PINUSE_BIT;
124,442✔
1390
        if (p == fm->dv) {
124,442✔
1391
          fm->dv = 0;
248✔
1392
          fm->dvsize = 0;
248✔
1393
        }
1394
        if (tsize > fm->trim_check)
124,442✔
1395
          alloc_trim(fm, 0);
19✔
1396
        return NULL;
124,442✔
1397
      } else if (next == fm->dv) {
141,013,258✔
1398
        size_t dsize = fm->dvsize += psize;
629,086✔
1399
        fm->dv = p;
629,086✔
1400
        set_size_and_pinuse_of_free_chunk(p, dsize);
629,086✔
1401
        return NULL;
629,086✔
1402
      } else {
1403
        size_t nsize = chunksize(next);
140,384,172✔
1404
        psize += nsize;
140,384,172✔
1405
        unlink_chunk(fm, next, nsize);
140,454,976✔
1406
        set_size_and_pinuse_of_free_chunk(p, psize);
140,384,172✔
1407
        if (p == fm->dv) {
140,384,172✔
1408
          fm->dvsize = psize;
76,684✔
1409
          return NULL;
76,684✔
1410
        }
1411
      }
1412
    } else {
1413
      set_free_with_pinuse(p, psize, next);
3,359,153✔
1414
    }
1415

1416
    if (is_small(psize)) {
143,666,641✔
1417
      insert_small_chunk(fm, p, psize);
4,995,422✔
1418
    } else {
1419
      tchunkptr tp = (tchunkptr)p;
138,671,219✔
1420
      insert_large_chunk(fm, tp, psize);
143,322,114✔
1421
      if (--fm->release_checks == 0)
138,671,219✔
1422
        release_unused_segments(fm);
543,596✔
1423
    }
1424
  }
1425
  return NULL;
1426
}
1427

1428
static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
124,965✔
1429
{
1430
  if (nsize >= MAX_REQUEST) {
124,965✔
1431
    return NULL;
1432
  } else {
1433
    mstate m = (mstate)msp;
124,965✔
1434
    mchunkptr oldp = mem2chunk(ptr);
124,965✔
1435
    size_t oldsize = chunksize(oldp);
124,965✔
1436
    mchunkptr next = chunk_plus_offset(oldp, oldsize);
124,965✔
1437
    mchunkptr newp = 0;
124,965✔
1438
    size_t nb = request2size(nsize);
124,965✔
1439

1440
    /* Try to either shrink or extend into top. Else malloc-copy-free */
1441
    if (is_direct(oldp)) {
124,965✔
1442
      newp = direct_resize(oldp, nb);  /* this may return NULL. */
173✔
1443
    } else if (oldsize >= nb) { /* already big enough */
124,792✔
1444
      size_t rsize = oldsize - nb;
662✔
1445
      newp = oldp;
662✔
1446
      if (rsize >= MIN_CHUNK_SIZE) {
662✔
1447
        mchunkptr rem = chunk_plus_offset(newp, nb);
653✔
1448
        set_inuse(m, newp, nb);
653✔
1449
        set_inuse(m, rem, rsize);
653✔
1450
        lj_alloc_free(m, chunk2mem(rem));
653✔
1451
      }
1452
    } else if (next == m->top && oldsize + m->topsize > nb) {
124,130✔
1453
      /* Expand into top */
1454
      size_t newsize = oldsize + m->topsize;
466✔
1455
      size_t newtopsize = newsize - nb;
466✔
1456
      mchunkptr newtop = chunk_plus_offset(oldp, nb);
466✔
1457
      set_inuse(m, oldp, nb);
466✔
1458
      newtop->head = newtopsize |PINUSE_BIT;
466✔
1459
      m->top = newtop;
466✔
1460
      m->topsize = newtopsize;
466✔
1461
      newp = oldp;
466✔
1462
    }
1463

1464
    if (newp != 0) {
1,301✔
1465
      return chunk2mem(newp);
1,298✔
1466
    } else {
1467
      void *newmem = lj_alloc_malloc(m, nsize);
123,667✔
1468
      if (newmem != 0) {
123,667✔
1469
        size_t oc = oldsize - overhead_for(oldp);
123,667✔
1470
        memcpy(newmem, ptr, oc < nsize ? oc : nsize);
123,667✔
1471
        lj_alloc_free(m, ptr);
123,667✔
1472
      }
1473
      return newmem;
123,667✔
1474
    }
1475
  }
1476
}
1477

1478
void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
288,921,994✔
1479
{
1480
  (void)osize;
288,921,994✔
1481
  if (nsize == 0) {
288,921,994✔
1482
    return lj_alloc_free(msp, ptr);
144,408,641✔
1483
  } else if (ptr == NULL) {
144,513,353✔
1484
    return lj_alloc_malloc(msp, nsize);
144,388,388✔
1485
  } else {
1486
    return lj_alloc_realloc(msp, ptr, nsize);
124,965✔
1487
  }
1488
}
1489

1490
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc