• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 24880518757

24 Apr 2026 08:40AM UTC coverage: 93.046% (-0.005%) from 93.051%
24880518757

push

github

mandesero
asan: fix internal allocator hardening

Fix several hardening allocator issues:
* guard ASAN redzone/alignment size arithmetic against overflow;
* avoid treating tail munmap addresses as full ASAN mmap allocations;
* route freed blocks through a bounded quarantine instead of leaking all
  allocations forever;
* free old realloc blocks through the same quarantine path.

Add allocator hardening tests for oversized allocation handling and
quarantine release behaviour.

5712 of 6046 branches covered (94.48%)

Branch coverage included in aggregate %.

5 of 9 new or added lines in 1 file covered. (55.56%)

2 existing lines in 1 file now uncovered.

21799 of 23521 relevant lines covered (92.68%)

3840128.72 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.43
/src/lj_alloc.c
1
/*
2
** Bundled memory allocator.
3
**
4
** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5
** The original bears the following remark:
6
**
7
**   This is a version (aka dlmalloc) of malloc/free/realloc written by
8
**   Doug Lea and released to the public domain, as explained at
9
**   http://creativecommons.org/licenses/publicdomain.
10
**
11
**   * Version pre-2.8.4 Wed Mar 29 19:46:29 2006    (dl at gee)
12
**
13
** No additional copyright is claimed over the customizations.
14
** Please do NOT bother the original author about this version here!
15
**
16
** If you want to use dlmalloc in another project, you should get
17
** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18
** For thread-safe derivatives, take a look at:
19
** - ptmalloc: http://www.malloc.de/
20
** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21
*/
22

23
#define lj_alloc_c
24
#define LUA_CORE
25

26
/* To get the mremap prototype. Must be defined before any system includes. */
27
#if defined(__linux__) && !defined(_GNU_SOURCE)
28
#define _GNU_SOURCE
29
#endif
30

31
#include "lj_def.h"
32
#include "lj_arch.h"
33
#include "lj_alloc.h"
34

35
#ifndef LUAJIT_USE_SYSMALLOC
36

37
#define MAX_SIZE_T                (~(size_t)0)
38
#define MALLOC_ALIGNMENT        ((size_t)8U)
39

40
#define DEFAULT_GRANULARITY        ((size_t)128U * (size_t)1024U)
41
#define DEFAULT_TRIM_THRESHOLD        ((size_t)2U * (size_t)1024U * (size_t)1024U)
42
#define DEFAULT_MMAP_THRESHOLD        ((size_t)128U * (size_t)1024U)
43
#define MAX_RELEASE_CHECK_RATE        255
44

45
/* ------------------- size_t and alignment properties -------------------- */
46

47
/* The byte and bit size of a size_t */
48
#define SIZE_T_SIZE                (sizeof(size_t))
49
#define SIZE_T_BITSIZE                (sizeof(size_t) << 3)
50

51
/* Some constants coerced to size_t */
52
/* Annoying but necessary to avoid errors on some platforms */
53
#define SIZE_T_ZERO                ((size_t)0)
54
#define SIZE_T_ONE                ((size_t)1)
55
#define SIZE_T_TWO                ((size_t)2)
56
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
57
#define FOUR_SIZE_T_SIZES        (SIZE_T_SIZE<<2)
58
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59

60
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
61
#define CHUNK_ALIGN_MASK        (MALLOC_ALIGNMENT - SIZE_T_ONE)
62

63
/* the number of bytes to offset an address to align it */
64
#define align_offset(A)\
65
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67

68
/* -------------------------- MMAP support ------------------------------- */
69

70
#define MFAIL                        ((void *)(MAX_SIZE_T))
71
#define CMFAIL                        ((char *)(MFAIL)) /* defined for convenience */
72

73
#define IS_DIRECT_BIT                (SIZE_T_ONE)
74

75

76
/* Determine system-specific block allocation method. */
77
#if LJ_TARGET_WINDOWS
78

79
#define WIN32_LEAN_AND_MEAN
80
#include <windows.h>
81

82
#define LJ_ALLOC_VIRTUALALLOC        1
83

84
#if LJ_64 && !LJ_GC64
85
#define LJ_ALLOC_NTAVM                1
86
#endif
87

88
#else
89

90
#include <errno.h>
91
/* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
92
#include <sys/mman.h>
93

94
#define LJ_ALLOC_MMAP                1
95

96
#if LJ_64
97

98
#define LJ_ALLOC_MMAP_PROBE        1
99

100
#if LJ_GC64
101
#define LJ_ALLOC_MBITS                47        /* 128 TB in LJ_GC64 mode. */
102
#elif LJ_TARGET_X64 && LJ_HASJIT
103
/* Due to limitations in the x64 compiler backend. */
104
#define LJ_ALLOC_MBITS                31        /* 2 GB on x64 with !LJ_GC64. */
105
#else
106
#define LJ_ALLOC_MBITS                32        /* 4 GB on other archs with !LJ_GC64. */
107
#endif
108

109
#endif
110

111
#if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
112
#define LJ_ALLOC_MMAP32                1
113
#endif
114

115
#if LJ_TARGET_LINUX
116
#define LJ_ALLOC_MREMAP                1
117
#endif
118

119
#endif
120

121

122
#if LJ_ALLOC_VIRTUALALLOC
123

124
#if LJ_ALLOC_NTAVM
125
/* Undocumented, but hey, that's what we all love so much about Windows. */
126
typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
127
                       size_t *size, ULONG alloctype, ULONG prot);
128
static PNTAVM ntavm;
129

130
/* Number of top bits of the lower 32 bits of an address that must be zero.
131
** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
132
*/
133
#define NTAVM_ZEROBITS                1
134

135
static void init_mmap(void)
136
{
137
  ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
138
                                 "NtAllocateVirtualMemory");
139
}
140
#define INIT_MMAP()        init_mmap()
141

142
/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
143
static void *CALL_MMAP(size_t size)
144
{
145
  DWORD olderr = GetLastError();
146
  void *ptr = NULL;
147
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
148
                  MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
149
  SetLastError(olderr);
150
  return st == 0 ? ptr : MFAIL;
151
}
152

153
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
154
static void *DIRECT_MMAP(size_t size)
155
{
156
  DWORD olderr = GetLastError();
157
  void *ptr = NULL;
158
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
159
                  MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
160
  SetLastError(olderr);
161
  return st == 0 ? ptr : MFAIL;
162
}
163

164
#else
165

166
/* Win32 MMAP via VirtualAlloc */
167
static void *CALL_MMAP(size_t size)
168
{
169
  DWORD olderr = GetLastError();
170
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
171
  SetLastError(olderr);
172
  return ptr ? ptr : MFAIL;
173
}
174

175
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
176
static void *DIRECT_MMAP(size_t size)
177
{
178
  DWORD olderr = GetLastError();
179
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
180
                            PAGE_READWRITE);
181
  SetLastError(olderr);
182
  return ptr ? ptr : MFAIL;
183
}
184

185
#endif
186

187
/* This function supports releasing coalesed segments */
188
static int CALL_MUNMAP(void *ptr, size_t size)
189
{
190
  DWORD olderr = GetLastError();
191
  MEMORY_BASIC_INFORMATION minfo;
192
  char *cptr = (char *)ptr;
193
  while (size) {
194
    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
195
      return -1;
196
    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
197
        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
198
      return -1;
199
    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
200
      return -1;
201
    cptr += minfo.RegionSize;
202
    size -= minfo.RegionSize;
203
  }
204
  SetLastError(olderr);
205
  return 0;
206
}
207

208
#elif LJ_ALLOC_MMAP
209

210
#define MMAP_PROT                (PROT_READ|PROT_WRITE)
211
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
212
#define MAP_ANONYMOUS                MAP_ANON
213
#endif
214
#define MMAP_FLAGS                (MAP_PRIVATE|MAP_ANONYMOUS)
215

216
#if LJ_ALLOC_MMAP_PROBE
217

218
#ifdef MAP_TRYFIXED
219
#define MMAP_FLAGS_PROBE        (MMAP_FLAGS|MAP_TRYFIXED)
220
#else
221
#define MMAP_FLAGS_PROBE        MMAP_FLAGS
222
#endif
223

224
#define LJ_ALLOC_MMAP_PROBE_MAX                30
225
#define LJ_ALLOC_MMAP_PROBE_LINEAR        5
226

227
#define LJ_ALLOC_MMAP_PROBE_LOWER        ((uintptr_t)0x4000)
228

229
#if LUAJIT_USE_ASAN_HARDENING
230

231
/*
232
** The work of asan (AddressSanitizer) is to detect memory errors during program execution.
233
** One way to achieve this is by adding redzones around memory allocations. The redzone is a
234
** specially allocated area of memory before and after the allocated block, which is filled
235
** with a unique value. If the program tries to access memory outside of the allocation,
236
** asan detects this attempt and generates an error message, allowing the developer to
237
** detect and fix the issue early.
238
**
239
** - Original paper: https://www.usenix.org/system/files/conference/atc12/atc12-final39.pdf
240
**
241
** LuaJIT ASAN instrumentation (mmap and others):
242
**
243
** - Memory map around allocation:
244
** -------------------------------------------------------------------------------------
245
** .. .. | [f7]    ...    [f7] | [00]     ...     [0(0-7)] | [f7]    ...    [f7] | .. ..
246
**       |    left redzone     |           data            |    right redzone    |
247
**       |  REDZONE_SIZE bytes |          N bytes          |  REDZONE_SIZE bytes |
248
** -------------------------------------------------------------------------------------
249
**
250
** left redzone:
251
**  The first SIZE_T_SIZE bytes of the redzone contain the data size N, the next SIZE_T_SIZE bytes
252
**  of the redzone contain the full size of the allocation, including the alignment of the size N
253
**  and the size of the redzones themselves.
254
*/
255

256
#include <sanitizer/asan_interface.h>
257

258
/**
259
 *
260
 * Memory map for 64-bit (shift = 3)
261
 * The shadow address is calculated by (Mem >> shift) + 0x7fff8000
262
 *
263
 * [0x10007fff8000, 0x7fffffffffff]        HighMem
264
 * [0x02008fff7000, 0x10007fff7fff]        HighShadow
265
 * [0x00008fff7000, 0x02008fff6fff]        ShadowGap
266
 * [0x00007fff8000, 0x00008fff6fff]        LowShadow
267
 * [0x000000000000, 0x00007fff7fff]        LowMem
268
 *
269
 */
270

271
/* Recommended redzone size from 16 to 2048 bytes (must be a a power of two)
272
** https://github.com/google/sanitizers/wiki/AddressSanitizerFlags
273
*/
274
#define REDZONE_SIZE FOUR_SIZE_T_SIZES
275

276
/* Total redzone size around allocation */
277
#define TOTAL_REDZONE_SIZE (REDZONE_SIZE << 1)
278

279
/* Multiple of the allocated memory size */
280
#define SIZE_ALIGNMENT MALLOC_ALIGNMENT
281

282
#define ASAN_QUARANTINE_MAX 1024
283
#define ASAN_QUARANTINE_MAX_BYTES (DEFAULT_GRANULARITY << 3)
284

285
/**
286
 * We can only use the address from HighMem, so we must force the system allocator (mmap)
287
 * to return addresses starting from the lower bound of HighMem.
288
 */
289
static inline uintptr_t asan_lower_address()
290
{
291
  size_t shadow_scale;
292
  size_t shadow_offset;
293
  __asan_get_shadow_mapping(&shadow_scale, &shadow_offset);
294
  return (uintptr_t)(shadow_offset + (1ULL << (LJ_ALLOC_MBITS - shadow_scale)));
295
}
296

297
/* Casting to the nearest multiple of SIZE_ALIGNMENT from above */
298
#define ALIGN_SIZE(S, ALIGN)  ((size_t)(((S) + (ALIGN) - 1) & ~((ALIGN) - 1)))
299

300
#define alloc2mem(p)                ((void *)((char *)(p) + REDZONE_SIZE))
301
#define mem2alloc(mem)                ((void *)((char *)(mem) - REDZONE_SIZE))
302

303
typedef enum {
304
  MEM_SIZE,
305
  POISON_SIZE
306
} SizeType;
307

308
typedef struct asan_quarantine_entry {
309
  void *msp;
310
  void *ptr;
311
  size_t size;
312
} asan_quarantine_entry;
313

314
static asan_quarantine_entry asan_quarantine[ASAN_QUARANTINE_MAX];
315
static size_t asan_quarantine_head;
316
static size_t asan_quarantine_count;
317
static size_t asan_quarantine_bytes;
318

319
static LJ_NOINLINE void *lj_alloc_free_raw(void *msp, void *ptr);
320

321
static int asan_add_overflow(size_t a, size_t b, size_t *r)
322
{
323
  *r = a + b;
324
  return *r < a;
325
}
326

327
static int asan_align_size(size_t size, size_t align, size_t *aligned)
328
{
329
  size_t add = align - 1;
330
  if (asan_add_overflow(size, add, aligned))
331
    return 1;
332
  *aligned &= ~add;
333
  return 0;
334
}
335

336
static int asan_mmap_size(size_t msize, size_t *psize)
337
{
338
  size_t size;
339
  if (asan_add_overflow(msize, TOTAL_REDZONE_SIZE, &size))
340
    return 1;
341
  return asan_align_size(size, LJ_PAGESIZE, psize);
342
}
343

344
/* Add redzones around allocation and keep the memory size and poison size. */
345
void *mark_memory_region(void *ptr, size_t msize, size_t psize)
346
{
347
  if (ptr == NULL)
348
    return NULL;
349
  if (ptr == MFAIL)
350
    return MFAIL;
351

352
  ASAN_UNPOISON_MEMORY_REGION(ptr, TWO_SIZE_T_SIZES);
353
  *((size_t *)(ptr)) = msize;
354
  *((size_t *)(ptr) + 1) = psize;
355
  ASAN_POISON_MEMORY_REGION(ptr, psize);
356
  ptr = alloc2mem(ptr);
357
  ASAN_UNPOISON_MEMORY_REGION(ptr, msize);
358
  return ptr;
359
}
360

361
size_t asan_get_size(void *ptr, SizeType type)
362
{
363
  size_t offset = (type == MEM_SIZE) ? 0 : SIZE_T_SIZE;
364
  char *alloc = (char *)mem2alloc(ptr);
365
  ASAN_UNPOISON_MEMORY_REGION(alloc + offset, SIZE_T_SIZE);
366
  size_t size = *((size_t *)(alloc + offset));
367
  ASAN_POISON_MEMORY_REGION(alloc + offset, SIZE_T_SIZE);
368
  return size;
369
}
370

371
#endif
372

373
/* No point in a giant ifdef mess. Just try to open /dev/urandom.
374
** It doesn't really matter if this fails, since we get some ASLR bits from
375
** every unsuitable allocation, too. And we prefer linear allocation, anyway.
376
*/
377
#include <fcntl.h>
378
#include <unistd.h>
379

380
static uintptr_t mmap_probe_seed(void)
×
381
{
382
  uintptr_t val;
×
383
  int fd = open("/dev/urandom", O_RDONLY);
×
384
  if (fd != -1) {
×
385
    int ok = ((size_t)read(fd, &val, sizeof(val)) == sizeof(val));
×
386
    (void)close(fd);
×
387
    if (ok) return val;
×
388
  }
389
  return 1;  /* Punt. */
390
}
391

392
static void *mmap_probe(size_t size)
41,551✔
393
{
394
  /* Hint for next allocation. Doesn't need to be thread-safe. */
395
  static uintptr_t hint_addr = 0;
41,551✔
396
  static uintptr_t hint_prng = 0;
41,551✔
397
  int olderr = errno;
41,551✔
398
  int retry;
41,551✔
399
#if LUAJIT_USE_ASAN_HARDENING
400
  /* Save the request memory size */
401
  size_t msize = size;
402
  /* Total allocation size corresponds to the memory size and the size of redzones */
403
  if (asan_mmap_size(size, &size))
404
    return MFAIL;
405
#endif
406
  for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
41,551✔
407
    void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
41,551✔
408
    uintptr_t addr = (uintptr_t)p;
41,551✔
409
#if LUAJIT_USE_ASAN_HARDENING
410
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= asan_lower_address() &&
411
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
412
#else
413
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER &&
41,551✔
414
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
41,551✔
415
#endif
416
      /* We got a suitable address. Bump the hint address. */
417
      hint_addr = addr + size;
41,551✔
418
      errno = olderr;
41,551✔
419
#if LUAJIT_USE_ASAN_HARDENING
420
      p = mark_memory_region(p, msize, size);
421
#endif
422
      return p;
41,551✔
423
    }
424
    if (p != MFAIL) {
×
425
      munmap(p, size);
×
426
    } else if (errno == ENOMEM) {
×
427
      return MFAIL;
428
    }
429
    if (hint_addr) {
×
430
      /* First, try linear probing. */
431
      if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
432
        hint_addr += 0x1000000;
×
433
        if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
×
434
          hint_addr = 0;
×
435
        continue;
×
436
      } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
437
        /* Next, try a no-hint probe to get back an ASLR address. */
438
        hint_addr = 0;
×
439
        continue;
×
440
      }
441
    }
442
    /* Finally, try pseudo-random probing. */
443
    if (LJ_UNLIKELY(hint_prng == 0)) {
×
444
      hint_prng = mmap_probe_seed();
×
445
    }
446
    /* The unsuitable address we got has some ASLR PRNG bits. */
447
    hint_addr ^= addr & ~((uintptr_t)(LJ_PAGESIZE-1));
×
448
    do {  /* The PRNG itself is very weak, but see above. */
×
449
      hint_prng = hint_prng * 1103515245 + 12345;
×
450
      hint_addr ^= hint_prng * (uintptr_t)LJ_PAGESIZE;
×
451
      hint_addr &= (((uintptr_t)1 << LJ_ALLOC_MBITS)-1);
×
452
    } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
×
453
  }
454
  errno = olderr;
×
455
  return MFAIL;
×
456
}
457

458
#endif
459

460
#if LJ_ALLOC_MMAP32
461

462
#if defined(__sun__)
463
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0x1000)
464
#else
465
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0)
466
#endif
467

468
static void *mmap_map32(size_t size)
469
{
470
#if LJ_ALLOC_MMAP_PROBE
471
  static int fallback = 0;
472
  if (fallback)
473
    return mmap_probe(size);
474
#endif
475
  {
476
    int olderr = errno;
477
    void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
478
    errno = olderr;
479
    /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
480
#if LJ_ALLOC_MMAP_PROBE
481
    if (ptr == MFAIL) {
482
      fallback = 1;
483
      return mmap_probe(size);
484
    }
485
#endif
486
    return ptr;
487
  }
488
}
489

490
#endif
491

492
#if LJ_ALLOC_MMAP32
493
#define CALL_MMAP(size)                mmap_map32(size)
494
#elif LJ_ALLOC_MMAP_PROBE
495
#define CALL_MMAP(size)                mmap_probe(size)
496
#else
497
static void *CALL_MMAP(size_t size)
498
{
499
  int olderr = errno;
500
#if LUAJIT_USE_ASAN_HARDENING
501
  size_t msize = size;
502
  if (asan_mmap_size(size, &size))
503
    return MFAIL;
504
#endif
505
#if LUAJIT_USE_ASAN_HARDENING
506
  void *ptr = mmap((void *)asan_lower_address(), size, MMAP_PROT, MMAP_FLAGS, -1, 0);
507
#else
508
  void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
509
#endif
510
  errno = olderr;
511
#if LUAJIT_USE_ASAN_HARDENING
512
  ptr = mark_memory_region(ptr, msize, size);
513
#endif
514
  return ptr;
515
}
516
#endif
517

518
#if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
519

520
#include <sys/resource.h>
521

522
static void init_mmap(void)
523
{
524
  struct rlimit rlim;
525
  rlim.rlim_cur = rlim.rlim_max = 0x10000;
526
  setrlimit(RLIMIT_DATA, &rlim);  /* Ignore result. May fail later. */
527
}
528
#define INIT_MMAP()        init_mmap()
529

530
#endif
531

532
static int CALL_MUNMAP(void *ptr, size_t size)
3,565✔
533
{
534
  int olderr = errno;
3,565✔
535
#if LUAJIT_USE_ASAN_HARDENING
536
  /* check that memory is not poisoned */
537
  memmove(ptr, ptr, size);
538
  size = asan_get_size(ptr, POISON_SIZE);
539
  ptr = mem2alloc(ptr);
540
  ASAN_UNPOISON_MEMORY_REGION(ptr, size);
541
#endif
542
  int ret = munmap(ptr, size);
3,492✔
543
  errno = olderr;
3,565✔
544
  return ret;
3,565✔
545
}
546

NEW
547
static int CALL_MUNMAP_TAIL(void *ptr, size_t size)
×
548
{
549
#if LUAJIT_USE_ASAN_HARDENING
550
  UNUSED(ptr);
551
  UNUSED(size);
552
  return -1;
553
#else
NEW
554
  int olderr = errno;
×
NEW
555
  int ret = munmap(ptr, size);
×
UNCOV
556
  errno = olderr;
×
UNCOV
557
  return ret;
×
558
#endif
559
}
560

561
#if LJ_ALLOC_MREMAP
562
/* Need to define _GNU_SOURCE to get the mremap prototype. */
563
static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
187✔
564
{
565
  int olderr = errno;
187✔
566
#if LUAJIT_USE_ASAN_HARDENING && !(LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64))
567
  void *new_ptr = CALL_MMAP(nsz);
568
  if (new_ptr != MFAIL) {
569
    size_t oms = asan_get_size(ptr, MEM_SIZE);
570
    memcpy(new_ptr, ptr, oms > nsz ? nsz : oms);
571
    CALL_MUNMAP(ptr, osz);
572
    ptr = new_ptr;
573
  }
574
#else
575

576
#if LUAJIT_USE_ASAN_HARDENING
577
  void *old_ptr = ptr;
578
  size_t nms = nsz;
579
  osz = asan_get_size(old_ptr, POISON_SIZE);
580
  if (asan_mmap_size(nsz, &nsz))
581
    return MFAIL;
582
  ptr = mem2alloc(ptr);
583
  ASAN_UNPOISON_MEMORY_REGION(ptr, osz);
584
#endif
585
  ptr = mremap(ptr, osz, nsz, flags);
374✔
586
#if LUAJIT_USE_ASAN_HARDENING
587
  if (ptr != MFAIL) {
588
    ptr = mark_memory_region(ptr, nms, nsz);
589
  } else {
590
    ASAN_POISON_MEMORY_REGION(mem2alloc(old_ptr), osz);
591
  }
592
#endif
593
#endif
594
  errno = olderr;
187✔
595
  return ptr;
187✔
596
}
597

598
#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
599
#define CALL_MREMAP_NOMOVE        0
600
#define CALL_MREMAP_MAYMOVE        1
601
#if LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64)
602
#define CALL_MREMAP_MV                CALL_MREMAP_NOMOVE
603
#else
604
#define CALL_MREMAP_MV                CALL_MREMAP_MAYMOVE
605
#endif
606
#endif
607

608
#endif
609

610

611
#ifndef INIT_MMAP
612
#define INIT_MMAP()                ((void)0)
613
#endif
614

615
#ifndef DIRECT_MMAP
616
#define DIRECT_MMAP(s)                CALL_MMAP(s)
617
#endif
618

619
#ifndef CALL_MREMAP
620
#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
621
#endif
622

623
/* -----------------------  Chunk representations ------------------------ */
624

625
struct malloc_chunk {
626
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
627
  size_t               head;       /* Size and inuse bits. */
628
  struct malloc_chunk *fd;         /* double links -- used only if free. */
629
  struct malloc_chunk *bk;
630
};
631

632
typedef struct malloc_chunk  mchunk;
633
typedef struct malloc_chunk *mchunkptr;
634
typedef struct malloc_chunk *sbinptr;  /* The type of bins of chunks */
635
typedef size_t bindex_t;               /* Described below */
636
typedef unsigned int binmap_t;         /* Described below */
637
typedef unsigned int flag_t;           /* The type of various bit flag sets */
638

639
/* ------------------- Chunks sizes and alignments ----------------------- */
640

641
#define MCHUNK_SIZE                (sizeof(mchunk))
642

643
#define CHUNK_OVERHEAD                (SIZE_T_SIZE)
644

645
/* Direct chunks need a second word of overhead ... */
646
#define DIRECT_CHUNK_OVERHEAD        (TWO_SIZE_T_SIZES)
647
/* ... and additional padding for fake next-chunk at foot */
648
#define DIRECT_FOOT_PAD                (FOUR_SIZE_T_SIZES)
649

650
/* The smallest size we can malloc is an aligned minimal chunk */
651
#define MIN_CHUNK_SIZE\
652
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
653

654
#if LUAJIT_USE_ASAN_HARDENING
655
/* conversion from malloc headers to user pointers, and back */
656
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES + REDZONE_SIZE))
657
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES - REDZONE_SIZE))
658
#else
659
/* conversion from malloc headers to user pointers, and back */
660
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
661
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
662
#endif
663
/* chunk associated with aligned address A */
664
#define align_as_chunk(A)        (mchunkptr)((A) + align_offset(chunk2mem(A)))
665

666
/* Bounds on request (not chunk) sizes. */
667
#define MAX_REQUEST                ((~MIN_CHUNK_SIZE+1) << 2)
668
#define MIN_REQUEST                (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
669

670
#if LUAJIT_USE_ASAN_HARDENING
671
static int asan_malloc_sizes(size_t nsize, size_t *mem_size, size_t *poison_size)
672
{
673
  size_t aligned;
674
  if (nsize == 0)
675
    nsize = MIN_CHUNK_SIZE;
676
  if (nsize >= MAX_REQUEST)
677
    return 1;
678
  if (asan_align_size(nsize, SIZE_ALIGNMENT, &aligned) ||
679
      asan_add_overflow(aligned, TOTAL_REDZONE_SIZE, poison_size) ||
680
      *poison_size >= MAX_REQUEST)
681
    return 1;
682
  *mem_size = nsize;
683
  return 0;
684
}
685
#endif
686

687
/* pad request bytes into a usable size */
688
#define pad_request(req) \
689
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
690

691
/* pad request, checking for minimum (but not maximum) */
692
#define request2size(req) \
693
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
694

695
/* ------------------ Operations on head and foot fields ----------------- */
696

697
#define PINUSE_BIT                (SIZE_T_ONE)
698
#define CINUSE_BIT                (SIZE_T_TWO)
699
#define INUSE_BITS                (PINUSE_BIT|CINUSE_BIT)
700

701
/* Head value for fenceposts */
702
#define FENCEPOST_HEAD                (INUSE_BITS|SIZE_T_SIZE)
703

704
/* extraction of fields from head words */
705
#define cinuse(p)                ((p)->head & CINUSE_BIT)
706
#define pinuse(p)                ((p)->head & PINUSE_BIT)
707
#define chunksize(p)                ((p)->head & ~(INUSE_BITS))
708

709
#define clear_pinuse(p)                ((p)->head &= ~PINUSE_BIT)
710
#define clear_cinuse(p)                ((p)->head &= ~CINUSE_BIT)
711

712
/* Treat space at ptr +/- offset as a chunk */
713
#define chunk_plus_offset(p, s)                ((mchunkptr)(((char *)(p)) + (s)))
714
#define chunk_minus_offset(p, s)        ((mchunkptr)(((char *)(p)) - (s)))
715

716
/* Ptr to next or previous physical malloc_chunk. */
717
#define next_chunk(p)        ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
718
#define prev_chunk(p)        ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
719

720
/* extract next chunk's pinuse bit */
721
#define next_pinuse(p)        ((next_chunk(p)->head) & PINUSE_BIT)
722

723
/* Get/set size at footer */
724
#define get_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot)
725
#define set_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
726

727
/* Set size, pinuse bit, and foot */
728
#define set_size_and_pinuse_of_free_chunk(p, s)\
729
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
730

731
/* Set size, pinuse bit, foot, and clear next pinuse */
732
#define set_free_with_pinuse(p, s, n)\
733
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
734

735
#define is_direct(p)\
736
  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
737

738
/* Get the internal overhead associated with chunk p */
739
#define overhead_for(p)\
740
 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
741

742
#if LUAJIT_USE_ASAN_HARDENING
743
static void asan_unpoison_chunk_for_free(void *ptr)
744
{
745
  mchunkptr p = mem2chunk(ptr);
746
  size_t psize = chunksize(p);
747
  ASAN_UNPOISON_MEMORY_REGION(p, psize + SIZE_T_SIZE);
748
}
749

750
static void asan_quarantine_release_one(void)
751
{
752
  asan_quarantine_entry *entry = &asan_quarantine[asan_quarantine_head];
753
  void *msp = entry->msp;
754
  void *ptr = entry->ptr;
755
  size_t size = entry->size;
756

757
  asan_quarantine_head = (asan_quarantine_head + 1) % ASAN_QUARANTINE_MAX;
758
  asan_quarantine_count--;
759
  asan_quarantine_bytes -= size;
760
  entry->msp = NULL;
761
  entry->ptr = NULL;
762
  entry->size = 0;
763

764
  asan_unpoison_chunk_for_free(ptr);
765
  lj_alloc_free_raw(msp, ptr);
766
}
767

768
static void asan_quarantine_push(void *msp, void *ptr, size_t size)
769
{
770
  if (size > ASAN_QUARANTINE_MAX_BYTES) {
771
    asan_unpoison_chunk_for_free(ptr);
772
    lj_alloc_free_raw(msp, ptr);
773
    return;
774
  }
775

776
  while (asan_quarantine_count == ASAN_QUARANTINE_MAX ||
777
         asan_quarantine_bytes + size > ASAN_QUARANTINE_MAX_BYTES)
778
    asan_quarantine_release_one();
779

780
  size_t idx = (asan_quarantine_head + asan_quarantine_count) %
781
               ASAN_QUARANTINE_MAX;
782
  asan_quarantine[idx].msp = msp;
783
  asan_quarantine[idx].ptr = ptr;
784
  asan_quarantine[idx].size = size;
785
  asan_quarantine_count++;
786
  asan_quarantine_bytes += size;
787
}
788

789
static void asan_quarantine_drain_msp(void *msp)
790
{
791
  size_t i, kept = 0;
792
  asan_quarantine_entry kept_entries[ASAN_QUARANTINE_MAX];
793

794
  for (i = 0; i < asan_quarantine_count; i++) {
795
    size_t idx = (asan_quarantine_head + i) % ASAN_QUARANTINE_MAX;
796
    asan_quarantine_entry entry = asan_quarantine[idx];
797
    if (entry.msp == msp) {
798
      asan_quarantine_bytes -= entry.size;
799
      asan_unpoison_chunk_for_free(entry.ptr);
800
      lj_alloc_free_raw(entry.msp, entry.ptr);
801
    } else {
802
      kept_entries[kept++] = entry;
803
    }
804
  }
805

806
  memset(asan_quarantine, 0, sizeof(asan_quarantine));
807
  for (i = 0; i < kept; i++)
808
    asan_quarantine[i] = kept_entries[i];
809
  asan_quarantine_head = 0;
810
  asan_quarantine_count = kept;
811
}
812
#endif
813

814
/* ---------------------- Overlaid data structures ----------------------- */
815

816
struct malloc_tree_chunk {
817
  /* The first four fields must be compatible with malloc_chunk */
818
  size_t                    prev_foot;
819
  size_t                    head;
820
  struct malloc_tree_chunk *fd;
821
  struct malloc_tree_chunk *bk;
822

823
  struct malloc_tree_chunk *child[2];
824
  struct malloc_tree_chunk *parent;
825
  bindex_t                  index;
826
};
827

828
typedef struct malloc_tree_chunk  tchunk;
829
typedef struct malloc_tree_chunk *tchunkptr;
830
typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
831

832
/* A little helper macro for trees */
833
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
834

835
/* ----------------------------- Segments -------------------------------- */
836

837
struct malloc_segment {
838
  char        *base;             /* base address */
839
  size_t       size;             /* allocated size */
840
  struct malloc_segment *next;   /* ptr to next segment */
841
};
842

843
typedef struct malloc_segment  msegment;
844
typedef struct malloc_segment *msegmentptr;
845

846
/* ---------------------------- malloc_state ----------------------------- */
847

848
/* Bin types, widths and sizes */
849
#define NSMALLBINS                (32U)
850
#define NTREEBINS                (32U)
851
#define SMALLBIN_SHIFT                (3U)
852
#define SMALLBIN_WIDTH                (SIZE_T_ONE << SMALLBIN_SHIFT)
853
#define TREEBIN_SHIFT                (8U)
854
#define MIN_LARGE_SIZE                (SIZE_T_ONE << TREEBIN_SHIFT)
855
#define MAX_SMALL_SIZE                (MIN_LARGE_SIZE - SIZE_T_ONE)
856
#define MAX_SMALL_REQUEST  (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
857

858
struct malloc_state {
859
  binmap_t   smallmap;
860
  binmap_t   treemap;
861
  size_t     dvsize;
862
  size_t     topsize;
863
  mchunkptr  dv;
864
  mchunkptr  top;
865
  size_t     trim_check;
866
  size_t     release_checks;
867
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
868
  tbinptr    treebins[NTREEBINS];
869
  msegment   seg;
870
};
871

872
typedef struct malloc_state *mstate;
873

874
#define is_initialized(M)        ((M)->top != 0)
875

876
/* -------------------------- system alloc setup ------------------------- */
877

878
/* page-align a size */
879
#define page_align(S)\
880
 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
881

882
/* granularity-align a size */
883
#define granularity_align(S)\
884
  (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
885
   & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
886

887
#if LJ_TARGET_WINDOWS
888
#define mmap_align(S)        granularity_align(S)
889
#else
890
#define mmap_align(S)        page_align(S)
891
#endif
892

893
/*  True if segment S holds address A */
894
#define segment_holds(S, A)\
895
  ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
896

897
/* Return segment holding given address */
898
static msegmentptr segment_holding(mstate m, char *addr)
133✔
899
{
900
  msegmentptr sp = &m->seg;
133✔
901
  for (;;) {
133✔
902
    if (addr >= sp->base && addr < sp->base + sp->size)
133✔
903
      return sp;
904
    if ((sp = sp->next) == 0)
×
905
      return 0;
906
  }
907
}
908

909
/* Return true if segment contains a segment link */
910
static int has_segment_link(mstate m, msegmentptr ss)
19✔
911
{
912
  msegmentptr sp = &m->seg;
19✔
913
  for (;;) {
96✔
914
    if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
96✔
915
      return 1;
916
    if ((sp = sp->next) == 0)
96✔
917
      return 0;
918
  }
919
}
920

921
/*
922
  TOP_FOOT_SIZE is padding at the end of a segment, including space
923
  that may be needed to place segment records and fenceposts when new
924
  noncontiguous segments are added.
925
*/
926
#define TOP_FOOT_SIZE\
927
  (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
928

929
/* ---------------------------- Indexing Bins ---------------------------- */
930

931
#define is_small(s)                (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
932
#define small_index(s)                ((s)  >> SMALLBIN_SHIFT)
933
#define small_index2size(i)        ((i)  << SMALLBIN_SHIFT)
934
#define MIN_SMALL_INDEX                (small_index(MIN_CHUNK_SIZE))
935

936
/* addressing by index. See above about smallbin repositioning */
937
#define smallbin_at(M, i)        ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
938
#define treebin_at(M,i)                (&((M)->treebins[i]))
939

940
/* assign tree index for size S to variable I */
941
#define compute_tree_index(S, I)\
942
{\
943
  unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
944
  if (X == 0) {\
945
    I = 0;\
946
  } else if (X > 0xFFFF) {\
947
    I = NTREEBINS-1;\
948
  } else {\
949
    unsigned int K = lj_fls(X);\
950
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
951
  }\
952
}
953

954
/* Bit representing maximum resolved size in a treebin at i */
955
#define bit_for_tree_index(i) \
956
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
957

958
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
959
#define leftshift_for_tree_index(i) \
960
   ((i == NTREEBINS-1)? 0 : \
961
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
962

963
/* The size of the smallest chunk held in bin with index i */
964
#define minsize_for_tree_index(i) \
965
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
966
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
967

968
/* ------------------------ Operations on bin maps ----------------------- */
969

970
/* bit corresponding to given index */
971
#define idx2bit(i)                ((binmap_t)(1) << (i))
972

973
/* Mark/Clear bits with given index */
974
#define mark_smallmap(M,i)        ((M)->smallmap |=  idx2bit(i))
975
#define clear_smallmap(M,i)        ((M)->smallmap &= ~idx2bit(i))
976
#define smallmap_is_marked(M,i)        ((M)->smallmap &   idx2bit(i))
977

978
#define mark_treemap(M,i)        ((M)->treemap  |=  idx2bit(i))
979
#define clear_treemap(M,i)        ((M)->treemap  &= ~idx2bit(i))
980
#define treemap_is_marked(M,i)        ((M)->treemap  &   idx2bit(i))
981

982
/* mask with all bits to left of least bit of x on */
983
#define left_bits(x)                ((x<<1) | (~(x<<1)+1))
984

985
/* Set cinuse bit and pinuse bit of next chunk */
986
#define set_inuse(M,p,s)\
987
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
988
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
989

990
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
991
#define set_inuse_and_pinuse(M,p,s)\
992
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
993
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
994

995
/* Set size, cinuse and pinuse bit of this chunk */
996
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
997
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
998

999
/* ----------------------- Operations on smallbins ----------------------- */
1000

1001
/* Link a free chunk into a smallbin  */
1002
#define insert_small_chunk(M, P, S) {\
1003
  bindex_t I = small_index(S);\
1004
  mchunkptr B = smallbin_at(M, I);\
1005
  mchunkptr F = B;\
1006
  if (!smallmap_is_marked(M, I))\
1007
    mark_smallmap(M, I);\
1008
  else\
1009
    F = B->fd;\
1010
  B->fd = P;\
1011
  F->bk = P;\
1012
  P->fd = F;\
1013
  P->bk = B;\
1014
}
1015

1016
/* Unlink a chunk from a smallbin  */
1017
#define unlink_small_chunk(M, P, S) {\
1018
  mchunkptr F = P->fd;\
1019
  mchunkptr B = P->bk;\
1020
  bindex_t I = small_index(S);\
1021
  if (F == B) {\
1022
    clear_smallmap(M, I);\
1023
  } else {\
1024
    F->bk = B;\
1025
    B->fd = F;\
1026
  }\
1027
}
1028

1029
/* Unlink the first chunk from a smallbin */
1030
#define unlink_first_small_chunk(M, B, P, I) {\
1031
  mchunkptr F = P->fd;\
1032
  if (B == F) {\
1033
    clear_smallmap(M, I);\
1034
  } else {\
1035
    B->fd = F;\
1036
    F->bk = B;\
1037
  }\
1038
}
1039

1040
/* Replace dv node, binning the old one */
1041
/* Used only when dvsize known to be small */
1042
#define replace_dv(M, P, S) {\
1043
  size_t DVS = M->dvsize;\
1044
  if (DVS != 0) {\
1045
    mchunkptr DV = M->dv;\
1046
    insert_small_chunk(M, DV, DVS);\
1047
  }\
1048
  M->dvsize = S;\
1049
  M->dv = P;\
1050
}
1051

1052
/* ------------------------- Operations on trees ------------------------- */
1053

1054
/* Insert chunk into tree */
1055
#define insert_large_chunk(M, X, S) {\
1056
  tbinptr *H;\
1057
  bindex_t I;\
1058
  compute_tree_index(S, I);\
1059
  H = treebin_at(M, I);\
1060
  X->index = I;\
1061
  X->child[0] = X->child[1] = 0;\
1062
  if (!treemap_is_marked(M, I)) {\
1063
    mark_treemap(M, I);\
1064
    *H = X;\
1065
    X->parent = (tchunkptr)H;\
1066
    X->fd = X->bk = X;\
1067
  } else {\
1068
    tchunkptr T = *H;\
1069
    size_t K = S << leftshift_for_tree_index(I);\
1070
    for (;;) {\
1071
      if (chunksize(T) != S) {\
1072
        tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
1073
        K <<= 1;\
1074
        if (*C != 0) {\
1075
          T = *C;\
1076
        } else {\
1077
          *C = X;\
1078
          X->parent = T;\
1079
          X->fd = X->bk = X;\
1080
          break;\
1081
        }\
1082
      } else {\
1083
        tchunkptr F = T->fd;\
1084
        T->fd = F->bk = X;\
1085
        X->fd = F;\
1086
        X->bk = T;\
1087
        X->parent = 0;\
1088
        break;\
1089
      }\
1090
    }\
1091
  }\
1092
}
1093

1094
#define unlink_large_chunk(M, X) {\
1095
  tchunkptr XP = X->parent;\
1096
  tchunkptr R;\
1097
  if (X->bk != X) {\
1098
    tchunkptr F = X->fd;\
1099
    R = X->bk;\
1100
    F->bk = R;\
1101
    R->fd = F;\
1102
  } else {\
1103
    tchunkptr *RP;\
1104
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
1105
        ((R = *(RP = &(X->child[0]))) != 0)) {\
1106
      tchunkptr *CP;\
1107
      while ((*(CP = &(R->child[1])) != 0) ||\
1108
             (*(CP = &(R->child[0])) != 0)) {\
1109
        R = *(RP = CP);\
1110
      }\
1111
      *RP = 0;\
1112
    }\
1113
  }\
1114
  if (XP != 0) {\
1115
    tbinptr *H = treebin_at(M, X->index);\
1116
    if (X == *H) {\
1117
      if ((*H = R) == 0) \
1118
        clear_treemap(M, X->index);\
1119
    } else {\
1120
      if (XP->child[0] == X) \
1121
        XP->child[0] = R;\
1122
      else \
1123
        XP->child[1] = R;\
1124
    }\
1125
    if (R != 0) {\
1126
      tchunkptr C0, C1;\
1127
      R->parent = XP;\
1128
      if ((C0 = X->child[0]) != 0) {\
1129
        R->child[0] = C0;\
1130
        C0->parent = R;\
1131
      }\
1132
      if ((C1 = X->child[1]) != 0) {\
1133
        R->child[1] = C1;\
1134
        C1->parent = R;\
1135
      }\
1136
    }\
1137
  }\
1138
}
1139

1140
/* Relays to large vs small bin operations */
1141

1142
#define insert_chunk(M, P, S)\
1143
  if (is_small(S)) { insert_small_chunk(M, P, S)\
1144
  } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
1145

1146
#define unlink_chunk(M, P, S)\
1147
  if (is_small(S)) { unlink_small_chunk(M, P, S)\
1148
  } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
1149

1150
/* -----------------------  Direct-mmapping chunks ----------------------- */
1151

1152
static void *direct_alloc(size_t nb)
3,053✔
1153
{
1154
#if LUAJIT_USE_ASAN_HARDENING
1155
  if (asan_add_overflow(nb, TOTAL_REDZONE_SIZE, &nb))
1156
    return NULL;
1157
#endif
1158
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3,053✔
1159
#if LUAJIT_USE_ASAN_HARDENING
1160
  mmsize -= TOTAL_REDZONE_SIZE;
1161
#endif
1162
  if (LJ_LIKELY(mmsize > nb)) {     /* Check for wrap around 0 */
3,053✔
1163
    char *mm = (char *)(DIRECT_MMAP(mmsize));
3,053✔
1164
    if (mm != CMFAIL) {
3,053✔
1165
      size_t offset = align_offset(chunk2mem(mm));
3,053✔
1166
      size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
3,053✔
1167
      mchunkptr p = (mchunkptr)(mm + offset);
3,053✔
1168
      p->prev_foot = offset | IS_DIRECT_BIT;
3,053✔
1169
      p->head = psize|CINUSE_BIT;
3,053✔
1170
      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3,053✔
1171
      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
3,053✔
1172
      return chunk2mem(p);
3,053✔
1173
    }
1174
  }
1175
  return NULL;
1176
}
1177

1178
static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
215✔
1179
{
1180
  size_t oldsize = chunksize(oldp);
215✔
1181
  if (is_small(nb)) /* Can't shrink direct regions below small size */
215✔
1182
    return NULL;
1183
  /* Keep old chunk if big enough but not too big */
1184
  if (oldsize >= nb + SIZE_T_SIZE &&
212✔
1185
      (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
76✔
1186
    return oldp;
1187
  } else {
1188
    size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
168✔
1189
    size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
168✔
1190
    size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
168✔
1191
    char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
168✔
1192
                                   oldmmsize, newmmsize, CALL_MREMAP_MV);
1193
    if (cp != CMFAIL) {
168✔
1194
      mchunkptr newp = (mchunkptr)(cp + offset);
168✔
1195
      size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
168✔
1196
      newp->head = psize|CINUSE_BIT;
168✔
1197
      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
168✔
1198
      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
168✔
1199
      return newp;
168✔
1200
    }
1201
  }
1202
  return NULL;
1203
}
1204

1205
/* -------------------------- mspace management -------------------------- */
1206

1207
/* Initialize top chunk and its size */
1208
static void init_top(mstate m, mchunkptr p, size_t psize)
1,455✔
1209
{
1210
  /* Ensure alignment */
1211
  void *t = chunk2mem(p);
1,455✔
1212
#if LUAJIT_USE_ASAN_HARDENING
1213
  t = mem2alloc(t);
1214
#endif
1215
  size_t offset = align_offset(t);
×
1216

1217
  p = (mchunkptr)((char *)p + offset);
1,455✔
1218
  psize -= offset;
1,455✔
1219

1220
  m->top = p;
1,455✔
1221
  m->topsize = psize;
1,455✔
1222
  p->head = psize | PINUSE_BIT;
1,455✔
1223
  /* set size of fake trailing chunk holding overhead space only once */
1224
  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
1,455✔
1225
  m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
1,455✔
1226
}
933✔
1227

1228
/* Initialize bins for a new mstate that is otherwise zeroed out */
1229
static void init_bins(mstate m)
408✔
1230
{
1231
  /* Establish circular links for smallbins */
1232
  bindex_t i;
408✔
1233
  for (i = 0; i < NSMALLBINS; i++) {
13,464✔
1234
    sbinptr bin = smallbin_at(m,i);
13,056✔
1235
    bin->fd = bin->bk = bin;
13,056✔
1236
  }
1237
}
1238

1239
/* Allocate chunk and prepend remainder with chunk in successor base. */
1240
static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
37,062✔
1241
{
1242
  mchunkptr p = align_as_chunk(newbase);
37,062✔
1243
  mchunkptr oldfirst = align_as_chunk(oldbase);
37,062✔
1244
  size_t psize = (size_t)((char *)oldfirst - (char *)p);
37,062✔
1245
  mchunkptr q = chunk_plus_offset(p, nb);
37,062✔
1246
  size_t qsize = psize - nb;
37,062✔
1247
  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
37,062✔
1248

1249
  /* consolidate remainder with first chunk of old base */
1250
  if (oldfirst == m->top) {
37,062✔
1251
    size_t tsize = m->topsize += qsize;
×
1252
    m->top = q;
×
1253
    q->head = tsize | PINUSE_BIT;
×
1254
  } else if (oldfirst == m->dv) {
37,062✔
1255
    size_t dsize = m->dvsize += qsize;
×
1256
    m->dv = q;
×
1257
    set_size_and_pinuse_of_free_chunk(q, dsize);
×
1258
  } else {
1259
    if (!cinuse(oldfirst)) {
37,062✔
1260
      size_t nsize = chunksize(oldfirst);
15✔
1261
      unlink_chunk(m, oldfirst, nsize);
16✔
1262
      oldfirst = chunk_plus_offset(oldfirst, nsize);
15✔
1263
      qsize += nsize;
15✔
1264
    }
1265
    set_free_with_pinuse(q, qsize, oldfirst);
37,062✔
1266
    insert_chunk(m, q, qsize);
37,062✔
1267
  }
1268

1269
  return chunk2mem(p);
37,062✔
1270
}
1271

1272
/* Add a segment to hold a new noncontiguous region */
1273
static void add_segment(mstate m, char *tbase, size_t tsize)
114✔
1274
{
1275
  /* Determine locations and sizes of segment, fenceposts, old top */
1276
  char *old_top = (char *)m->top;
114✔
1277
  msegmentptr oldsp = segment_holding(m, old_top);
114✔
1278
#if LUAJIT_USE_ASAN_HARDENING
1279
  ASAN_UNPOISON_MEMORY_REGION(oldsp, sizeof(struct malloc_segment));
1280
#endif
1281
  char *old_end = oldsp->base + oldsp->size;
114✔
1282
  size_t ssize = pad_request(sizeof(struct malloc_segment));
114✔
1283
  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
114✔
1284
  size_t offset = align_offset(chunk2mem(rawsp));
114✔
1285
  char *asp = rawsp + offset;
114✔
1286
  char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
114✔
1287
  mchunkptr sp = (mchunkptr)csp;
114✔
1288
  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
114✔
1289
#if LUAJIT_USE_ASAN_HARDENING
1290
  ss = (msegmentptr)(mem2alloc(ss));
1291
#endif
1292
  mchunkptr tnext = chunk_plus_offset(sp, ssize);
114✔
1293
  mchunkptr p = tnext;
114✔
1294

1295
  /* reset top to new space */
1296
  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
114✔
1297

1298
  /* Set up segment record */
1299
  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
114✔
1300
  *ss = m->seg; /* Push current record */
114✔
1301
  m->seg.base = tbase;
114✔
1302
  m->seg.size = tsize;
114✔
1303
  m->seg.next = ss;
114✔
1304

1305
  /* Insert trailing fenceposts */
1306
  for (;;) {
379✔
1307
    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
379✔
1308
    p->head = FENCEPOST_HEAD;
379✔
1309
    if ((char *)(&(nextp->head)) < old_end)
379✔
1310
      p = nextp;
1311
    else
1312
      break;
1313
  }
1314

1315
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1316
  if (csp != old_top) {
114✔
1317
    mchunkptr q = (mchunkptr)old_top;
99✔
1318
    size_t psize = (size_t)(csp - old_top);
99✔
1319
    mchunkptr tn = chunk_plus_offset(q, psize);
99✔
1320
    set_free_with_pinuse(q, psize, tn);
99✔
1321
    insert_chunk(m, q, psize);
108✔
1322
  }
1323
}
114✔
1324

1325
/* -------------------------- System allocation -------------------------- */
1326

1327
static void *alloc_sys(mstate m, size_t nb)
41,143✔
1328
{
1329
  char *tbase = CMFAIL;
41,143✔
1330
  size_t tsize = 0;
41,143✔
1331

1332
  /* Directly map large chunks */
1333
  if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
41,143✔
1334
    void *mem = direct_alloc(nb);
3,053✔
1335
    if (mem != 0)
3,053✔
1336
      return mem;
1337
  }
1338

1339
  {
1340
    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
38,090✔
1341
#if LUAJIT_USE_ASAN_HARDENING
1342
    if (asan_add_overflow(req, TOTAL_REDZONE_SIZE, &req))
1343
      return NULL;
1344
#endif
1345
    size_t rsize = granularity_align(req);
38,090✔
1346
#if LUAJIT_USE_ASAN_HARDENING
1347
    rsize -= TOTAL_REDZONE_SIZE;
1348
#endif
1349
    if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
38,090✔
1350
      char *mp = (char *)(CALL_MMAP(rsize));
38,090✔
1351
      if (mp != CMFAIL) {
38,090✔
1352
        tbase = mp;
38,090✔
1353
        tsize = rsize;
38,090✔
1354
      }
1355
    }
1356
  }
1357

1358
  if (tbase != CMFAIL) {
38,090✔
1359
    msegmentptr sp = &m->seg;
38,090✔
1360
    /* Try to merge with an existing segment */
1361
    while (sp != 0 && tbase != sp->base + sp->size)
353,225✔
1362
      sp = sp->next;
315,135✔
1363
    if (sp != 0 && segment_holds(sp, m->top)) { /* append */
38,090✔
1364
      sp->size += tsize;
914✔
1365
      init_top(m, m->top, m->topsize + tsize);
914✔
1366
    } else {
1367
      sp = &m->seg;
1368
      while (sp != 0 && sp->base != tbase + tsize)
66,376✔
1369
        sp = sp->next;
29,200✔
1370
      if (sp != 0) {
37,176✔
1371
        char *oldbase = sp->base;
37,062✔
1372
        sp->base = tbase;
37,062✔
1373
        sp->size += tsize;
37,062✔
1374
        return prepend_alloc(m, tbase, oldbase, nb);
37,062✔
1375
      } else {
1376
        add_segment(m, tbase, tsize);
114✔
1377
      }
1378
    }
1379

1380
    if (nb < m->topsize) { /* Allocate from new or extended top space */
1,028✔
1381
      size_t rsize = m->topsize -= nb;
1,028✔
1382
      mchunkptr p = m->top;
1,028✔
1383
      mchunkptr r = m->top = chunk_plus_offset(p, nb);
1,028✔
1384
      r->head = rsize | PINUSE_BIT;
1,028✔
1385
      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1,028✔
1386
      return chunk2mem(p);
1,028✔
1387
    }
1388
  }
1389

1390
  return NULL;
1391
}
1392

1393
/* -----------------------  system deallocation -------------------------- */
1394

1395
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1396
static size_t release_unused_segments(mstate m)
660,078✔
1397
{
1398
  size_t released = 0;
660,078✔
1399
  size_t nsegs = 0;
660,078✔
1400
  msegmentptr pred = &m->seg;
660,078✔
1401
  msegmentptr sp = pred->next;
660,078✔
1402
  while (sp != 0) {
7,487,599✔
1403
    char *base = sp->base;
6,827,521✔
1404
    size_t size = sp->size;
6,827,521✔
1405
    msegmentptr next = sp->next;
6,827,521✔
1406
    nsegs++;
6,827,521✔
1407
    {
1408
      mchunkptr p = align_as_chunk(base);
6,827,521✔
1409
      size_t psize = chunksize(p);
6,827,521✔
1410
      /* Can unmap if first chunk holds entire segment and not pinned */
1411
      if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
6,827,521✔
1412
        tchunkptr tp = (tchunkptr)p;
73✔
1413
        if (p == m->dv) {
73✔
1414
          m->dv = 0;
×
1415
          m->dvsize = 0;
×
1416
        } else {
1417
          unlink_large_chunk(m, tp);
73✔
1418
        }
1419
        if (CALL_MUNMAP(base, size) == 0) {
73✔
1420
          released += size;
73✔
1421
          /* unlink obsoleted record */
1422
          sp = pred;
73✔
1423
          sp->next = next;
73✔
1424
        } else { /* back out if cannot unmap */
1425
          insert_large_chunk(m, tp, psize);
×
1426
        }
1427
      }
1428
    }
1429
    pred = sp;
1430
    sp = next;
1431
  }
1432
  /* Reset check counter */
1433
  m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
660,078✔
1434
                      nsegs : MAX_RELEASE_CHECK_RATE;
660,078✔
1435
  return released;
660,078✔
1436
}
1437

1438
static int alloc_trim(mstate m, size_t pad)
19✔
1439
{
1440
  size_t released = 0;
19✔
1441
  if (pad < MAX_REQUEST && is_initialized(m)) {
19✔
1442
    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
19✔
1443

1444
    if (m->topsize > pad) {
19✔
1445
      /* Shrink top space in granularity-size units, keeping at least one */
1446
      size_t unit = DEFAULT_GRANULARITY;
19✔
1447
      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
19✔
1448
                      SIZE_T_ONE) * unit;
1449
      msegmentptr sp = segment_holding(m, (char *)m->top);
19✔
1450

1451
      if (sp->size >= extra &&
19✔
1452
          !has_segment_link(m, sp)) { /* can't shrink if pinned */
38✔
1453
        size_t newsize = sp->size - extra;
19✔
1454
        /* Prefer mremap, fall back to munmap */
1455
        if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
19✔
NEW
1456
            (CALL_MUNMAP_TAIL(sp->base + newsize, extra) == 0)) {
×
1457
          released = extra;
19✔
1458
        }
1459
      }
1460

1461
      if (released != 0) {
19✔
1462
        sp->size -= released;
19✔
1463
        init_top(m, m->top, m->topsize - released);
19✔
1464
      }
1465
    }
1466

1467
    /* Unmap any unused mmapped segments */
1468
    released += release_unused_segments(m);
19✔
1469

1470
    /* On failure, disable autotrim to avoid repeated failed future calls */
1471
    if (released == 0 && m->topsize > m->trim_check)
19✔
1472
      m->trim_check = MAX_SIZE_T;
×
1473
  }
1474

1475
  return (released != 0)? 1 : 0;
19✔
1476
}
1477

1478
/* ---------------------------- malloc support --------------------------- */
1479

1480
/* allocate a large request from the best fitting chunk in a treebin */
1481
static void *tmalloc_large(mstate m, size_t nb)
293,415✔
1482
{
1483
  tchunkptr v = 0;
293,415✔
1484
  size_t rsize = ~nb+1; /* Unsigned negation */
293,415✔
1485
  tchunkptr t;
293,415✔
1486
  bindex_t idx;
293,415✔
1487
  compute_tree_index(nb, idx);
293,415✔
1488

1489
  if ((t = *treebin_at(m, idx)) != 0) {
293,415✔
1490
    /* Traverse tree for this bin looking for node with size == nb */
1491
    size_t sizebits = nb << leftshift_for_tree_index(idx);
114,125✔
1492
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
114,125✔
1493
    for (;;) {
239,735✔
1494
      tchunkptr rt;
176,930✔
1495
      size_t trem = chunksize(t) - nb;
176,930✔
1496
      if (trem < rsize) {
176,930✔
1497
        v = t;
103,422✔
1498
        if ((rsize = trem) == 0)
103,422✔
1499
          break;
1500
      }
1501
      rt = t->child[1];
154,700✔
1502
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
154,700✔
1503
      if (rt != 0 && rt != t)
154,700✔
1504
        rst = rt;
48,917✔
1505
      if (t == 0) {
154,700✔
1506
        t = rst; /* set t to least subtree holding sizes > nb */
1507
        break;
1508
      }
1509
      sizebits <<= 1;
62,805✔
1510
    }
1511
  }
1512

1513
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
293,415✔
1514
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
228,322✔
1515
    if (leftbits != 0)
228,322✔
1516
      t = *treebin_at(m, lj_ffs(leftbits));
215,408✔
1517
  }
1518

1519
  while (t != 0) { /* find smallest of tree or subtree */
662,688✔
1520
    size_t trem = chunksize(t) - nb;
369,273✔
1521
    if (trem < rsize) {
369,273✔
1522
      rsize = trem;
284,130✔
1523
      v = t;
284,130✔
1524
    }
1525
    t = leftmost_child(t);
369,273✔
1526
  }
1527

1528
  /*  If dv is a better fit, return NULL so malloc will use it */
1529
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
293,415✔
1530
    mchunkptr r = chunk_plus_offset(v, nb);
239,882✔
1531
    unlink_large_chunk(m, v);
253,351✔
1532
    if (rsize < MIN_CHUNK_SIZE) {
239,882✔
1533
      set_inuse_and_pinuse(m, v, (rsize + nb));
35,280✔
1534
    } else {
1535
      set_size_and_pinuse_of_inuse_chunk(m, v, nb);
204,602✔
1536
      set_size_and_pinuse_of_free_chunk(r, rsize);
204,602✔
1537
      insert_chunk(m, r, rsize);
233,466✔
1538
    }
1539
    return chunk2mem(v);
239,882✔
1540
  }
1541
  return NULL;
1542
}
1543

1544
/* allocate a small request from the best fitting chunk in a treebin */
1545
static void *tmalloc_small(mstate m, size_t nb)
190,347✔
1546
{
1547
  tchunkptr t, v;
190,347✔
1548
  mchunkptr r;
190,347✔
1549
  size_t rsize;
190,347✔
1550
  bindex_t i = lj_ffs(m->treemap);
190,347✔
1551

1552
  v = t = *treebin_at(m, i);
190,347✔
1553
  rsize = chunksize(t) - nb;
190,347✔
1554

1555
  while ((t = leftmost_child(t)) != 0) {
399,268✔
1556
    size_t trem = chunksize(t) - nb;
208,921✔
1557
    if (trem < rsize) {
208,921✔
1558
      rsize = trem;
113,701✔
1559
      v = t;
113,701✔
1560
    }
1561
  }
1562

1563
  r = chunk_plus_offset(v, nb);
190,347✔
1564
  unlink_large_chunk(m, v);
225,063✔
1565
  if (rsize < MIN_CHUNK_SIZE) {
190,347✔
1566
    set_inuse_and_pinuse(m, v, (rsize + nb));
26✔
1567
  } else {
1568
    set_size_and_pinuse_of_inuse_chunk(m, v, nb);
190,321✔
1569
    set_size_and_pinuse_of_free_chunk(r, rsize);
190,321✔
1570
    replace_dv(m, r, rsize);
190,321✔
1571
  }
1572
  return chunk2mem(v);
190,347✔
1573
}
1574

1575
/* ----------------------------------------------------------------------- */
1576

1577
void *lj_alloc_create(void)
408✔
1578
{
1579
  size_t tsize = DEFAULT_GRANULARITY;
408✔
1580
#if LUAJIT_USE_ASAN_HARDENING
1581
  tsize -= TOTAL_REDZONE_SIZE;
1582
#endif
1583
  char *tbase;
408✔
1584
  INIT_MMAP();
408✔
1585
  tbase = (char *)(CALL_MMAP(tsize));
408✔
1586
  if (tbase != CMFAIL) {
408✔
1587
    size_t msize = pad_request(sizeof(struct malloc_state));
408✔
1588
    mchunkptr mn;
408✔
1589
#if LUAJIT_USE_ASAN_HARDENING
1590
    mchunkptr msp = (mchunkptr)(tbase + align_offset(mem2alloc(chunk2mem(tbase))));
1591
    mstate m = (mstate)(mem2alloc(chunk2mem(msp)));
1592
#else
1593
    mchunkptr msp = align_as_chunk(tbase);
408✔
1594
    mstate m = (mstate)(chunk2mem(msp));
408✔
1595
#endif
1596
    memset(m, 0, msize);
408✔
1597
    msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
408✔
1598
    m->seg.base = tbase;
408✔
1599
    m->seg.size = tsize;
408✔
1600
    m->release_checks = MAX_RELEASE_CHECK_RATE;
408✔
1601
    init_bins(m);
408✔
1602
#if LUAJIT_USE_ASAN_HARDENING
1603
    mn = next_chunk((mchunkptr)((char *)(m) - TWO_SIZE_T_SIZES));
1604
#else
1605
    mn = next_chunk(mem2chunk(m));
408✔
1606
#endif
1607
    init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
408✔
1608
    return m;
408✔
1609
  }
1610
  return NULL;
1611
}
1612

1613
void lj_alloc_destroy(void *msp)
398✔
1614
{
1615
  mstate ms = (mstate)msp;
398✔
1616
  msegmentptr sp = &ms->seg;
398✔
1617
#if LUAJIT_USE_ASAN_HARDENING
1618
  asan_quarantine_drain_msp(msp);
1619
#endif
1620
  while (sp != 0) {
837✔
1621
    char *base = sp->base;
439✔
1622
    size_t size = sp->size;
439✔
1623
    sp = sp->next;
439✔
1624
#if LUAJIT_USE_ASAN_HARDENING
1625
    ASAN_UNPOISON_MEMORY_REGION(base, size);
1626
#endif
1627
    CALL_MUNMAP(base, size);
439✔
1628
  }
1629
}
398✔
1630

1631
static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
174,979,498✔
1632
{
1633
#if LUAJIT_USE_ASAN_HARDENING
1634
  size_t mem_size;
1635
  size_t poison_size;
1636
  if (asan_malloc_sizes(nsize, &mem_size, &poison_size))
1637
    return NULL;
1638
  nsize = poison_size;
1639
#endif
1640
  mstate ms = (mstate)msp;
174,979,498✔
1641
  void *mem;
174,979,498✔
1642
  size_t nb;
174,979,498✔
1643
  if (nsize <= MAX_SMALL_REQUEST) {
174,979,498✔
1644
    bindex_t idx;
174,650,293✔
1645
    binmap_t smallbits;
174,650,293✔
1646
    nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
174,650,293✔
1647
    idx = small_index(nb);
174,650,293✔
1648
    smallbits = ms->smallmap >> idx;
174,650,293✔
1649

1650
    if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
174,650,293✔
1651
      mchunkptr b, p;
626,341✔
1652
      idx += ~smallbits & 1;       /* Uses next bin if idx empty */
626,341✔
1653
      b = smallbin_at(ms, idx);
626,341✔
1654
      p = b->fd;
626,341✔
1655
      unlink_first_small_chunk(ms, b, p, idx);
626,341✔
1656
      set_inuse_and_pinuse(ms, p, small_index2size(idx));
626,341✔
1657
      mem = chunk2mem(p);
626,341✔
1658
#if LUAJIT_USE_ASAN_HARDENING
1659
      mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1660
#endif
1661
      return mem;
626,341✔
1662
    } else if (nb > ms->dvsize) {
174,023,952✔
1663
      if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4,502,881✔
1664
        mchunkptr b, p, r;
129,059✔
1665
        size_t rsize;
129,059✔
1666
        binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
129,059✔
1667
        bindex_t i = lj_ffs(leftbits);
129,059✔
1668
        b = smallbin_at(ms, i);
129,059✔
1669
        p = b->fd;
129,059✔
1670
        unlink_first_small_chunk(ms, b, p, i);
129,059✔
1671
        rsize = small_index2size(i) - nb;
129,059✔
1672
        /* Fit here cannot be remainderless if 4byte sizes */
1673
        if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
129,059✔
1674
          set_inuse_and_pinuse(ms, p, small_index2size(i));
25,872✔
1675
        } else {
1676
          set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
103,187✔
1677
          r = chunk_plus_offset(p, nb);
103,187✔
1678
          set_size_and_pinuse_of_free_chunk(r, rsize);
103,187✔
1679
          replace_dv(ms, r, rsize);
103,187✔
1680
        }
1681
        mem = chunk2mem(p);
129,059✔
1682
#if LUAJIT_USE_ASAN_HARDENING
1683
  mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1684
#endif
1685
        return mem;
129,059✔
1686
      } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4,373,822✔
1687
#if LUAJIT_USE_ASAN_HARDENING
1688
  mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1689
#endif
1690
        return mem;
1691
      }
1692
    }
1693
  } else if (nsize >= MAX_REQUEST) {
329,205✔
1694
    nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1695
  } else {
1696
    nb = pad_request(nsize);
329,205✔
1697
    if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
329,205✔
1698
#if LUAJIT_USE_ASAN_HARDENING
1699
      mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1700
#endif
1701
      return mem;
1702
    }
1703
  }
1704

1705
  if (nb <= ms->dvsize) {
173,793,869✔
1706
    size_t rsize = ms->dvsize - nb;
169,577,691✔
1707
    mchunkptr p = ms->dv;
169,577,691✔
1708
    if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
169,577,691✔
1709
      mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
169,401,500✔
1710
      ms->dvsize = rsize;
169,401,500✔
1711
      set_size_and_pinuse_of_free_chunk(r, rsize);
169,401,500✔
1712
      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
169,401,500✔
1713
    } else { /* exhaust dv */
1714
      size_t dvs = ms->dvsize;
176,191✔
1715
      ms->dvsize = 0;
176,191✔
1716
      ms->dv = 0;
176,191✔
1717
      set_inuse_and_pinuse(ms, p, dvs);
176,191✔
1718
    }
1719
    mem = chunk2mem(p);
169,577,691✔
1720
#if LUAJIT_USE_ASAN_HARDENING
1721
    mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1722
#endif
1723
    return mem;
169,577,691✔
1724
  } else if (nb < ms->topsize) { /* Split top */
4,216,178✔
1725
    size_t rsize = ms->topsize -= nb;
4,175,035✔
1726
    mchunkptr p = ms->top;
4,175,035✔
1727
    mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4,175,035✔
1728
    r->head = rsize | PINUSE_BIT;
4,175,035✔
1729
    set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4,175,035✔
1730
    mem = chunk2mem(p);
4,175,035✔
1731
#if LUAJIT_USE_ASAN_HARDENING
1732
    mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1733
#endif
1734
    return mem;
4,175,035✔
1735
  }
1736
#if LUAJIT_USE_ASAN_HARDENING
1737
  mem = alloc_sys(ms, nb);
1738
  return mem != NULL ? mark_memory_region(mem2alloc(mem), mem_size, poison_size) :
1739
                       NULL;
1740
#else
1741
  return alloc_sys(ms, nb);
41,143✔
1742
#endif
1743
}
1744

1745
static LJ_NOINLINE void *lj_alloc_free_raw(void *msp, void *ptr)
175,013,467✔
1746
{
1747
  if (ptr != 0) {
175,013,467✔
1748
    mchunkptr p = mem2chunk(ptr);
174,971,886✔
1749
    mstate fm = (mstate)msp;
174,971,886✔
1750
    size_t psize = chunksize(p);
174,971,886✔
1751
    mchunkptr next = chunk_plus_offset(p, psize);
174,971,886✔
1752
    if (!pinuse(p)) {
174,971,886✔
1753
      size_t prevsize = p->prev_foot;
27,218,992✔
1754
      if ((prevsize & IS_DIRECT_BIT) != 0) {
27,218,992✔
1755
        prevsize &= ~IS_DIRECT_BIT;
3,053✔
1756
        psize += prevsize + DIRECT_FOOT_PAD;
3,053✔
1757
        CALL_MUNMAP((char *)p - prevsize, psize);
3,053✔
1758
        return NULL;
3,053✔
1759
      } else {
1760
        mchunkptr prev = chunk_minus_offset(p, prevsize);
27,215,939✔
1761
        psize += prevsize;
27,215,939✔
1762
        p = prev;
27,215,939✔
1763
        /* consolidate backward */
1764
        if (p != fm->dv) {
27,215,939✔
1765
          unlink_chunk(fm, p, prevsize);
26,899,793✔
1766
        } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
390,895✔
1767
          fm->dvsize = psize;
288,889✔
1768
          set_free_with_pinuse(p, psize, next);
288,889✔
1769
          return NULL;
288,889✔
1770
        }
1771
      }
1772
    }
1773
    if (!cinuse(next)) {  /* consolidate forward */
174,679,944✔
1774
      if (next == fm->top) {
146,926,045✔
1775
        size_t tsize = fm->topsize += psize;
151,066✔
1776
        fm->top = p;
151,066✔
1777
        p->head = tsize | PINUSE_BIT;
151,066✔
1778
        if (p == fm->dv) {
151,066✔
1779
          fm->dv = 0;
302✔
1780
          fm->dvsize = 0;
302✔
1781
        }
1782
        if (tsize > fm->trim_check)
151,066✔
1783
          alloc_trim(fm, 0);
19✔
1784
        return NULL;
151,066✔
1785
      } else if (next == fm->dv) {
146,774,979✔
1786
        size_t dsize = fm->dvsize += psize;
829,708✔
1787
        fm->dv = p;
829,708✔
1788
        set_size_and_pinuse_of_free_chunk(p, dsize);
829,708✔
1789
        return NULL;
829,708✔
1790
      } else {
1791
        size_t nsize = chunksize(next);
145,945,271✔
1792
        psize += nsize;
145,945,271✔
1793
        unlink_chunk(fm, next, nsize);
146,025,262✔
1794
        set_size_and_pinuse_of_free_chunk(p, psize);
145,945,271✔
1795
        if (p == fm->dv) {
145,945,271✔
1796
          fm->dvsize = psize;
101,704✔
1797
          return NULL;
101,704✔
1798
        }
1799
      }
1800
    } else {
1801
      set_free_with_pinuse(p, psize, next);
27,753,899✔
1802
    }
1803

1804
    if (is_small(psize)) {
173,597,466✔
1805
      insert_small_chunk(fm, p, psize);
5,218,092✔
1806
    } else {
1807
      tchunkptr tp = (tchunkptr)p;
168,379,374✔
1808
      insert_large_chunk(fm, tp, psize);
181,522,181✔
1809
      if (--fm->release_checks == 0)
168,379,374✔
1810
        release_unused_segments(fm);
660,059✔
1811
    }
1812
  }
1813
  return NULL;
1814
}
1815

1816
static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
175,013,467✔
1817
{
1818
#if LUAJIT_USE_ASAN_HARDENING
1819
  if (ptr != 0) {
1820
    size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1821
    size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1822

1823
    memmove(ptr, ptr, mem_size);
1824
    ASAN_POISON_MEMORY_REGION(mem2alloc(ptr), poison_size);
1825
    asan_quarantine_push(msp, ptr, poison_size);
1826
  }
1827
  return NULL;
1828
#else
1829
  return lj_alloc_free_raw(msp, ptr);
175,013,467✔
1830
#endif
1831
}
1832

1833
static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
127,503✔
1834
{
1835
#if LUAJIT_USE_ASAN_HARDENING
1836
  if (nsize >= MAX_REQUEST)
1837
    return NULL;
1838

1839
  mstate m = (mstate)msp;
1840

1841
  size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1842

1843
  void *newmem = lj_alloc_malloc(m, nsize);
1844

1845
  if (newmem == NULL)
1846
    return NULL;
1847

1848
  memcpy(newmem, ptr, nsize > mem_size ? mem_size : nsize);
1849
  lj_alloc_free(msp, ptr);
1850
  return newmem;
1851
#else
1852
  if (nsize >= MAX_REQUEST) {
127,503✔
1853
    return NULL;
1854
  } else {
1855
    mstate m = (mstate)msp;
127,503✔
1856
    mchunkptr oldp = mem2chunk(ptr);
127,503✔
1857
    size_t oldsize = chunksize(oldp);
127,503✔
1858
    mchunkptr next = chunk_plus_offset(oldp, oldsize);
127,503✔
1859
    mchunkptr newp = 0;
127,503✔
1860
    size_t nb = request2size(nsize);
127,503✔
1861

1862
    /* Try to either shrink or extend into top. Else malloc-copy-free */
1863
    if (is_direct(oldp)) {
127,503✔
1864
      newp = direct_resize(oldp, nb);  /* this may return NULL. */
215✔
1865
    } else if (oldsize >= nb) { /* already big enough */
127,288✔
1866
      size_t rsize = oldsize - nb;
784✔
1867
      newp = oldp;
784✔
1868
      if (rsize >= MIN_CHUNK_SIZE) {
784✔
1869
        mchunkptr rem = chunk_plus_offset(newp, nb);
777✔
1870
        set_inuse(m, newp, nb);
777✔
1871
        set_inuse(m, rem, rsize);
777✔
1872
        lj_alloc_free(m, chunk2mem(rem));
777✔
1873
      }
1874
    } else if (next == m->top && oldsize + m->topsize > nb) {
126,504✔
1875
      /* Expand into top */
1876
      size_t newsize = oldsize + m->topsize;
552✔
1877
      size_t newtopsize = newsize - nb;
552✔
1878
      mchunkptr newtop = chunk_plus_offset(oldp, nb);
552✔
1879
      set_inuse(m, oldp, nb);
552✔
1880
      newtop->head = newtopsize |PINUSE_BIT;
552✔
1881
      m->top = newtop;
552✔
1882
      m->topsize = newtopsize;
552✔
1883
      newp = oldp;
552✔
1884
    }
1885

1886
    if (newp != 0) {
1,551✔
1887
      return chunk2mem(newp);
1,548✔
1888
    } else {
1889
      void *newmem = lj_alloc_malloc(m, nsize);
125,955✔
1890
      if (newmem != 0) {
125,955✔
1891
        size_t oc = oldsize - overhead_for(oldp);
125,955✔
1892
        memcpy(newmem, ptr, oc < nsize ? oc : nsize);
125,955✔
1893
        lj_alloc_free(m, ptr);
125,955✔
1894
      }
1895
      return newmem;
125,955✔
1896
    }
1897
  }
1898
#endif
1899
}
1900

1901
void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
349,867,781✔
1902
{
1903
  (void)osize;
349,867,781✔
1904
  if (nsize == 0) {
349,867,781✔
1905
    return lj_alloc_free(msp, ptr);
174,886,735✔
1906
  } else if (ptr == NULL) {
174,981,046✔
1907
    return lj_alloc_malloc(msp, nsize);
174,853,543✔
1908
  } else {
1909
    return lj_alloc_realloc(msp, ptr, nsize);
127,503✔
1910
  }
1911
}
1912

1913
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc