• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 24862238503

23 Apr 2026 10:33PM UTC coverage: 93.019% (-0.03%) from 93.051%
24862238503

push

github

mandesero
asan: fix internal allocator hardening

Fix several hardening allocator issues:
* guard ASAN redzone/alignment size arithmetic against overflow;
* avoid treating tail munmap addresses as full ASAN mmap allocations;
* route freed blocks through a bounded quarantine instead of leaking all
  allocations forever;
* free old realloc blocks through the same quarantine path.

Add allocator hardening tests for oversized allocation handling and
quarantine release behaviour.

5712 of 6046 branches covered (94.48%)

Branch coverage included in aggregate %.

3 of 9 new or added lines in 1 file covered. (33.33%)

5 existing lines in 1 file now uncovered.

21791 of 23521 relevant lines covered (92.64%)

3954411.85 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.43
/src/lj_alloc.c
1
/*
2
** Bundled memory allocator.
3
**
4
** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5
** The original bears the following remark:
6
**
7
**   This is a version (aka dlmalloc) of malloc/free/realloc written by
8
**   Doug Lea and released to the public domain, as explained at
9
**   http://creativecommons.org/licenses/publicdomain.
10
**
11
**   * Version pre-2.8.4 Wed Mar 29 19:46:29 2006    (dl at gee)
12
**
13
** No additional copyright is claimed over the customizations.
14
** Please do NOT bother the original author about this version here!
15
**
16
** If you want to use dlmalloc in another project, you should get
17
** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18
** For thread-safe derivatives, take a look at:
19
** - ptmalloc: http://www.malloc.de/
20
** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21
*/
22

23
#define lj_alloc_c
24
#define LUA_CORE
25

26
/* To get the mremap prototype. Must be defined before any system includes. */
27
#if defined(__linux__) && !defined(_GNU_SOURCE)
28
#define _GNU_SOURCE
29
#endif
30

31
#include "lj_def.h"
32
#include "lj_arch.h"
33
#include "lj_alloc.h"
34

35
#ifndef LUAJIT_USE_SYSMALLOC
36

37
#define MAX_SIZE_T                (~(size_t)0)
38
#define MALLOC_ALIGNMENT        ((size_t)8U)
39

40
#define DEFAULT_GRANULARITY        ((size_t)128U * (size_t)1024U)
41
#define DEFAULT_TRIM_THRESHOLD        ((size_t)2U * (size_t)1024U * (size_t)1024U)
42
#define DEFAULT_MMAP_THRESHOLD        ((size_t)128U * (size_t)1024U)
43
#define MAX_RELEASE_CHECK_RATE        255
44

45
/* ------------------- size_t and alignment properties -------------------- */
46

47
/* The byte and bit size of a size_t */
48
#define SIZE_T_SIZE                (sizeof(size_t))
49
#define SIZE_T_BITSIZE                (sizeof(size_t) << 3)
50

51
/* Some constants coerced to size_t */
52
/* Annoying but necessary to avoid errors on some platforms */
53
#define SIZE_T_ZERO                ((size_t)0)
54
#define SIZE_T_ONE                ((size_t)1)
55
#define SIZE_T_TWO                ((size_t)2)
56
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
57
#define FOUR_SIZE_T_SIZES        (SIZE_T_SIZE<<2)
58
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59

60
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
61
#define CHUNK_ALIGN_MASK        (MALLOC_ALIGNMENT - SIZE_T_ONE)
62

63
/* the number of bytes to offset an address to align it */
64
#define align_offset(A)\
65
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67

68
/* -------------------------- MMAP support ------------------------------- */
69

70
#define MFAIL                        ((void *)(MAX_SIZE_T))
71
#define CMFAIL                        ((char *)(MFAIL)) /* defined for convenience */
72

73
#define IS_DIRECT_BIT                (SIZE_T_ONE)
74

75

76
/* Determine system-specific block allocation method. */
77
#if LJ_TARGET_WINDOWS
78

79
#define WIN32_LEAN_AND_MEAN
80
#include <windows.h>
81

82
#define LJ_ALLOC_VIRTUALALLOC        1
83

84
#if LJ_64 && !LJ_GC64
85
#define LJ_ALLOC_NTAVM                1
86
#endif
87

88
#else
89

90
#include <errno.h>
91
/* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
92
#include <sys/mman.h>
93

94
#define LJ_ALLOC_MMAP                1
95

96
#if LJ_64
97

98
#define LJ_ALLOC_MMAP_PROBE        1
99

100
#if LJ_GC64
101
#define LJ_ALLOC_MBITS                47        /* 128 TB in LJ_GC64 mode. */
102
#elif LJ_TARGET_X64 && LJ_HASJIT
103
/* Due to limitations in the x64 compiler backend. */
104
#define LJ_ALLOC_MBITS                31        /* 2 GB on x64 with !LJ_GC64. */
105
#else
106
#define LJ_ALLOC_MBITS                32        /* 4 GB on other archs with !LJ_GC64. */
107
#endif
108

109
#endif
110

111
#if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
112
#define LJ_ALLOC_MMAP32                1
113
#endif
114

115
#if LJ_TARGET_LINUX
116
#define LJ_ALLOC_MREMAP                1
117
#endif
118

119
#endif
120

121

122
#if LJ_ALLOC_VIRTUALALLOC
123

124
#if LJ_ALLOC_NTAVM
125
/* Undocumented, but hey, that's what we all love so much about Windows. */
126
typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
127
                       size_t *size, ULONG alloctype, ULONG prot);
128
static PNTAVM ntavm;
129

130
/* Number of top bits of the lower 32 bits of an address that must be zero.
131
** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
132
*/
133
#define NTAVM_ZEROBITS                1
134

135
static void init_mmap(void)
136
{
137
  ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
138
                                 "NtAllocateVirtualMemory");
139
}
140
#define INIT_MMAP()        init_mmap()
141

142
/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
143
static void *CALL_MMAP(size_t size)
144
{
145
  DWORD olderr = GetLastError();
146
  void *ptr = NULL;
147
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
148
                  MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
149
  SetLastError(olderr);
150
  return st == 0 ? ptr : MFAIL;
151
}
152

153
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
154
static void *DIRECT_MMAP(size_t size)
155
{
156
  DWORD olderr = GetLastError();
157
  void *ptr = NULL;
158
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
159
                  MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
160
  SetLastError(olderr);
161
  return st == 0 ? ptr : MFAIL;
162
}
163

164
#else
165

166
/* Win32 MMAP via VirtualAlloc */
167
static void *CALL_MMAP(size_t size)
168
{
169
  DWORD olderr = GetLastError();
170
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
171
  SetLastError(olderr);
172
  return ptr ? ptr : MFAIL;
173
}
174

175
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
176
static void *DIRECT_MMAP(size_t size)
177
{
178
  DWORD olderr = GetLastError();
179
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
180
                            PAGE_READWRITE);
181
  SetLastError(olderr);
182
  return ptr ? ptr : MFAIL;
183
}
184

185
#endif
186

187
/* This function supports releasing coalesed segments */
188
static int CALL_MUNMAP(void *ptr, size_t size)
189
{
190
  DWORD olderr = GetLastError();
191
  MEMORY_BASIC_INFORMATION minfo;
192
  char *cptr = (char *)ptr;
193
  while (size) {
194
    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
195
      return -1;
196
    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
197
        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
198
      return -1;
199
    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
200
      return -1;
201
    cptr += minfo.RegionSize;
202
    size -= minfo.RegionSize;
203
  }
204
  SetLastError(olderr);
205
  return 0;
206
}
207

208
#elif LJ_ALLOC_MMAP
209

210
#define MMAP_PROT                (PROT_READ|PROT_WRITE)
211
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
212
#define MAP_ANONYMOUS                MAP_ANON
213
#endif
214
#define MMAP_FLAGS                (MAP_PRIVATE|MAP_ANONYMOUS)
215

216
#if LJ_ALLOC_MMAP_PROBE
217

218
#ifdef MAP_TRYFIXED
219
#define MMAP_FLAGS_PROBE        (MMAP_FLAGS|MAP_TRYFIXED)
220
#else
221
#define MMAP_FLAGS_PROBE        MMAP_FLAGS
222
#endif
223

224
#define LJ_ALLOC_MMAP_PROBE_MAX                30
225
#define LJ_ALLOC_MMAP_PROBE_LINEAR        5
226

227
#define LJ_ALLOC_MMAP_PROBE_LOWER        ((uintptr_t)0x4000)
228

229
#if LUAJIT_USE_ASAN_HARDENING
230

231
/*
232
** The work of asan (AddressSanitizer) is to detect memory errors during program execution.
233
** One way to achieve this is by adding redzones around memory allocations. The redzone is a
234
** specially allocated area of memory before and after the allocated block, which is filled
235
** with a unique value. If the program tries to access memory outside of the allocation,
236
** asan detects this attempt and generates an error message, allowing the developer to
237
** detect and fix the issue early.
238
**
239
** - Original paper: https://www.usenix.org/system/files/conference/atc12/atc12-final39.pdf
240
**
241
** LuaJIT ASAN instrumentation (mmap and others):
242
**
243
** - Memory map around allocation:
244
** -------------------------------------------------------------------------------------
245
** .. .. | [f7]    ...    [f7] | [00]     ...     [0(0-7)] | [f7]    ...    [f7] | .. ..
246
**       |    left redzone     |           data            |    right redzone    |
247
**       |  REDZONE_SIZE bytes |          N bytes          |  REDZONE_SIZE bytes |
248
** -------------------------------------------------------------------------------------
249
**
250
** left redzone:
251
**  The first SIZE_T_SIZE bytes of the redzone contain the data size N, the next SIZE_T_SIZE bytes
252
**  of the redzone contain the full size of the allocation, including the alignment of the size N
253
**  and the size of the redzones themselves.
254
*/
255

256
#include <sanitizer/asan_interface.h>
257

258
/**
259
 *
260
 * Memory map for 64-bit (shift = 3)
261
 * The shadow address is calculated by (Mem >> shift) + 0x7fff8000
262
 *
263
 * [0x10007fff8000, 0x7fffffffffff]        HighMem
264
 * [0x02008fff7000, 0x10007fff7fff]        HighShadow
265
 * [0x00008fff7000, 0x02008fff6fff]        ShadowGap
266
 * [0x00007fff8000, 0x00008fff6fff]        LowShadow
267
 * [0x000000000000, 0x00007fff7fff]        LowMem
268
 *
269
 */
270

271
/* Recommended redzone size from 16 to 2048 bytes (must be a a power of two)
272
** https://github.com/google/sanitizers/wiki/AddressSanitizerFlags
273
*/
274
#define REDZONE_SIZE FOUR_SIZE_T_SIZES
275

276
/* Total redzone size around allocation */
277
#define TOTAL_REDZONE_SIZE (REDZONE_SIZE << 1)
278

279
/* Multiple of the allocated memory size */
280
#define SIZE_ALIGNMENT MALLOC_ALIGNMENT
281

282
#define ASAN_QUARANTINE_MAX 1024
283
#define ASAN_QUARANTINE_MAX_BYTES (DEFAULT_GRANULARITY << 3)
284

285
/**
286
 * We can only use the address from HighMem, so we must force the system allocator (mmap)
287
 * to return addresses starting from the lower bound of HighMem.
288
 */
289
static inline uintptr_t asan_lower_address()
290
{
291
  size_t shadow_scale;
292
  size_t shadow_offset;
293
  __asan_get_shadow_mapping(&shadow_scale, &shadow_offset);
294
  return (uintptr_t)(shadow_offset + (1ULL << (LJ_ALLOC_MBITS - shadow_scale)));
295
}
296

297
/* Casting to the nearest multiple of SIZE_ALIGNMENT from above */
298
#define ALIGN_SIZE(S, ALIGN)  ((size_t)(((S) + (ALIGN) - 1) & ~((ALIGN) - 1)))
299

300
#define alloc2mem(p)                ((void *)((char *)(p) + REDZONE_SIZE))
301
#define mem2alloc(mem)                ((void *)((char *)(mem) - REDZONE_SIZE))
302

303
typedef enum {
304
  MEM_SIZE,
305
  POISON_SIZE
306
} SizeType;
307

308
typedef struct asan_quarantine_entry {
309
  void *msp;
310
  void *ptr;
311
  size_t size;
312
} asan_quarantine_entry;
313

314
static asan_quarantine_entry asan_quarantine[ASAN_QUARANTINE_MAX];
315
static size_t asan_quarantine_head;
316
static size_t asan_quarantine_count;
317
static size_t asan_quarantine_bytes;
318

319
static LJ_NOINLINE void *lj_alloc_free_raw(void *msp, void *ptr);
320

321
static int asan_add_overflow(size_t a, size_t b, size_t *r)
322
{
323
  *r = a + b;
324
  return *r < a;
325
}
326

327
static int asan_align_size(size_t size, size_t align, size_t *aligned)
328
{
329
  size_t add = align - 1;
330
  if (asan_add_overflow(size, add, aligned))
331
    return 1;
332
  *aligned &= ~add;
333
  return 0;
334
}
335

336
static int asan_mmap_size(size_t msize, size_t *psize)
337
{
338
  size_t size;
339
  if (asan_add_overflow(msize, TOTAL_REDZONE_SIZE, &size))
340
    return 1;
341
  return asan_align_size(size, LJ_PAGESIZE, psize);
342
}
343

344
/* Add redzones around allocation and keep the memory size and poison size. */
345
void *mark_memory_region(void *ptr, size_t msize, size_t psize)
346
{
347
  if (ptr == NULL)
348
    return NULL;
349
  if (ptr == MFAIL)
350
    return MFAIL;
351

352
  ASAN_UNPOISON_MEMORY_REGION(ptr, TWO_SIZE_T_SIZES);
353
  *((size_t *)(ptr)) = msize;
354
  *((size_t *)(ptr) + 1) = psize;
355
  ASAN_POISON_MEMORY_REGION(ptr, psize);
356
  ptr = alloc2mem(ptr);
357
  ASAN_UNPOISON_MEMORY_REGION(ptr, msize);
358
  return ptr;
359
}
360

361
size_t asan_get_size(void *ptr, SizeType type)
362
{
363
  size_t offset = (type == MEM_SIZE) ? 0 : SIZE_T_SIZE;
364
  char *alloc = (char *)mem2alloc(ptr);
365
  ASAN_UNPOISON_MEMORY_REGION(alloc + offset, SIZE_T_SIZE);
366
  size_t size = *((size_t *)(alloc + offset));
367
  ASAN_POISON_MEMORY_REGION(alloc + offset, SIZE_T_SIZE);
368
  return size;
369
}
370

371
#endif
372

373
/* No point in a giant ifdef mess. Just try to open /dev/urandom.
374
** It doesn't really matter if this fails, since we get some ASLR bits from
375
** every unsuitable allocation, too. And we prefer linear allocation, anyway.
376
*/
377
#include <fcntl.h>
378
#include <unistd.h>
379

380
static uintptr_t mmap_probe_seed(void)
×
381
{
382
  uintptr_t val;
×
383
  int fd = open("/dev/urandom", O_RDONLY);
×
384
  if (fd != -1) {
×
385
    int ok = ((size_t)read(fd, &val, sizeof(val)) == sizeof(val));
×
386
    (void)close(fd);
×
387
    if (ok) return val;
×
388
  }
389
  return 1;  /* Punt. */
390
}
391

392
static void *mmap_probe(size_t size)
41,296✔
393
{
394
  /* Hint for next allocation. Doesn't need to be thread-safe. */
395
  static uintptr_t hint_addr = 0;
41,296✔
396
  static uintptr_t hint_prng = 0;
41,296✔
397
  int olderr = errno;
41,296✔
398
  int retry;
41,296✔
399
#if LUAJIT_USE_ASAN_HARDENING
400
  /* Save the request memory size */
401
  size_t msize = size;
402
  /* Total allocation size corresponds to the memory size and the size of redzones */
403
  if (asan_mmap_size(size, &size))
404
    return MFAIL;
405
#endif
406
  for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
41,296✔
407
    void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
41,296✔
408
    uintptr_t addr = (uintptr_t)p;
41,296✔
409
#if LUAJIT_USE_ASAN_HARDENING
410
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= asan_lower_address() &&
411
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
412
#else
413
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER &&
41,296✔
414
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
41,296✔
415
#endif
416
      /* We got a suitable address. Bump the hint address. */
417
      hint_addr = addr + size;
41,296✔
418
      errno = olderr;
41,296✔
419
#if LUAJIT_USE_ASAN_HARDENING
420
      p = mark_memory_region(p, msize, size);
421
#endif
422
      return p;
41,296✔
423
    }
424
    if (p != MFAIL) {
×
425
      munmap(p, size);
×
426
    } else if (errno == ENOMEM) {
×
427
      return MFAIL;
428
    }
429
    if (hint_addr) {
×
430
      /* First, try linear probing. */
431
      if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
432
        hint_addr += 0x1000000;
×
433
        if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
×
434
          hint_addr = 0;
×
435
        continue;
×
436
      } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
437
        /* Next, try a no-hint probe to get back an ASLR address. */
438
        hint_addr = 0;
×
439
        continue;
×
440
      }
441
    }
442
    /* Finally, try pseudo-random probing. */
443
    if (LJ_UNLIKELY(hint_prng == 0)) {
×
444
      hint_prng = mmap_probe_seed();
×
445
    }
446
    /* The unsuitable address we got has some ASLR PRNG bits. */
447
    hint_addr ^= addr & ~((uintptr_t)(LJ_PAGESIZE-1));
×
448
    do {  /* The PRNG itself is very weak, but see above. */
×
449
      hint_prng = hint_prng * 1103515245 + 12345;
×
450
      hint_addr ^= hint_prng * (uintptr_t)LJ_PAGESIZE;
×
451
      hint_addr &= (((uintptr_t)1 << LJ_ALLOC_MBITS)-1);
×
452
    } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
×
453
  }
454
  errno = olderr;
×
455
  return MFAIL;
×
456
}
457

458
#endif
459

460
#if LJ_ALLOC_MMAP32
461

462
#if defined(__sun__)
463
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0x1000)
464
#else
465
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0)
466
#endif
467

468
static void *mmap_map32(size_t size)
469
{
470
#if LJ_ALLOC_MMAP_PROBE
471
  static int fallback = 0;
472
  if (fallback)
473
    return mmap_probe(size);
474
#endif
475
  {
476
    int olderr = errno;
477
    void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
478
    errno = olderr;
479
    /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
480
#if LJ_ALLOC_MMAP_PROBE
481
    if (ptr == MFAIL) {
482
      fallback = 1;
483
      return mmap_probe(size);
484
    }
485
#endif
486
    return ptr;
487
  }
488
}
489

490
#endif
491

492
#if LJ_ALLOC_MMAP32
493
#define CALL_MMAP(size)                mmap_map32(size)
494
#elif LJ_ALLOC_MMAP_PROBE
495
#define CALL_MMAP(size)                mmap_probe(size)
496
#else
497
static void *CALL_MMAP(size_t size)
498
{
499
  int olderr = errno;
500
#if LUAJIT_USE_ASAN_HARDENING
501
  size_t msize = size;
502
  if (asan_mmap_size(size, &size))
503
    return MFAIL;
504
#endif
505
#if LUAJIT_USE_ASAN_HARDENING
506
  void *ptr = mmap((void *)asan_lower_address(), size, MMAP_PROT, MMAP_FLAGS, -1, 0);
507
#else
508
  void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
509
#endif
510
  errno = olderr;
511
#if LUAJIT_USE_ASAN_HARDENING
512
  ptr = mark_memory_region(ptr, msize, size);
513
#endif
514
  return ptr;
515
}
516
#endif
517

518
#if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
519

520
#include <sys/resource.h>
521

522
static void init_mmap(void)
523
{
524
  struct rlimit rlim;
525
  rlim.rlim_cur = rlim.rlim_max = 0x10000;
526
  setrlimit(RLIMIT_DATA, &rlim);  /* Ignore result. May fail later. */
527
}
528
#define INIT_MMAP()        init_mmap()
529

530
#endif
531

532
static int CALL_MUNMAP(void *ptr, size_t size)
3,586✔
533
{
534
  int olderr = errno;
3,586✔
535
#if LUAJIT_USE_ASAN_HARDENING
536
  /* check that memory is not poisoned */
537
  memmove(ptr, ptr, size);
538
  size = asan_get_size(ptr, POISON_SIZE);
539
  ptr = mem2alloc(ptr);
540
#endif
541
  int ret = munmap(ptr, size);
3,512✔
542
#if LUAJIT_USE_ASAN_HARDENING
543
  if (ret == 0) {
544
    ASAN_POISON_MEMORY_REGION(ptr, size);
545
  }
546
#endif
547
  errno = olderr;
3,586✔
548
  return ret;
3,586✔
549
}
550

NEW
551
static int CALL_MUNMAP_TAIL(void *ptr, size_t size)
×
552
{
553
#if LUAJIT_USE_ASAN_HARDENING
554
  UNUSED(ptr);
555
  UNUSED(size);
556
  return -1;
557
#else
NEW
558
  int olderr = errno;
×
NEW
559
  int ret = munmap(ptr, size);
×
NEW
560
  errno = olderr;
×
NEW
561
  return ret;
×
562
#endif
563
}
564

565
#if LJ_ALLOC_MREMAP
566
/* Need to define _GNU_SOURCE to get the mremap prototype. */
567
static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
198✔
568
{
569
  int olderr = errno;
198✔
570
#if LUAJIT_USE_ASAN_HARDENING && !(LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64))
571
  void *new_ptr = CALL_MMAP(nsz);
572
  if (new_ptr != MFAIL) {
573
    size_t oms = asan_get_size(ptr, MEM_SIZE);
574
    memcpy(new_ptr, ptr, oms > nsz ? nsz : oms);
575
    CALL_MUNMAP(ptr, osz);
576
    ptr = new_ptr;
577
  }
578
#else
579

580
#if LUAJIT_USE_ASAN_HARDENING
581
  void *old_ptr = ptr;
582
  size_t nms = nsz;
583
  osz = asan_get_size(old_ptr, POISON_SIZE);
584
  if (asan_mmap_size(nsz, &nsz))
585
    return MFAIL;
586
  ptr = mem2alloc(ptr);
587
#endif
588
  ptr = mremap(ptr, osz, nsz, flags);
396✔
589
#if LUAJIT_USE_ASAN_HARDENING
590
  if (ptr != MFAIL) {
591
    ASAN_POISON_MEMORY_REGION((void *)((char *)(old_ptr) - REDZONE_SIZE), osz);
592
    ptr = mark_memory_region(ptr, nms, nsz);
593
  }
594
#endif
595
#endif
596
  errno = olderr;
198✔
597
  return ptr;
198✔
598
}
599

600
#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
601
#define CALL_MREMAP_NOMOVE        0
602
#define CALL_MREMAP_MAYMOVE        1
603
#if LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64)
604
#define CALL_MREMAP_MV                CALL_MREMAP_NOMOVE
605
#else
606
#define CALL_MREMAP_MV                CALL_MREMAP_MAYMOVE
607
#endif
608
#endif
609

610
#endif
611

612

613
#ifndef INIT_MMAP
614
#define INIT_MMAP()                ((void)0)
615
#endif
616

617
#ifndef DIRECT_MMAP
618
#define DIRECT_MMAP(s)                CALL_MMAP(s)
619
#endif
620

621
#ifndef CALL_MREMAP
622
#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
623
#endif
624

625
/* -----------------------  Chunk representations ------------------------ */
626

627
struct malloc_chunk {
628
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
629
  size_t               head;       /* Size and inuse bits. */
630
  struct malloc_chunk *fd;         /* double links -- used only if free. */
631
  struct malloc_chunk *bk;
632
};
633

634
typedef struct malloc_chunk  mchunk;
635
typedef struct malloc_chunk *mchunkptr;
636
typedef struct malloc_chunk *sbinptr;  /* The type of bins of chunks */
637
typedef size_t bindex_t;               /* Described below */
638
typedef unsigned int binmap_t;         /* Described below */
639
typedef unsigned int flag_t;           /* The type of various bit flag sets */
640

641
/* ------------------- Chunks sizes and alignments ----------------------- */
642

643
#define MCHUNK_SIZE                (sizeof(mchunk))
644

645
#define CHUNK_OVERHEAD                (SIZE_T_SIZE)
646

647
/* Direct chunks need a second word of overhead ... */
648
#define DIRECT_CHUNK_OVERHEAD        (TWO_SIZE_T_SIZES)
649
/* ... and additional padding for fake next-chunk at foot */
650
#define DIRECT_FOOT_PAD                (FOUR_SIZE_T_SIZES)
651

652
/* The smallest size we can malloc is an aligned minimal chunk */
653
#define MIN_CHUNK_SIZE\
654
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
655

656
#if LUAJIT_USE_ASAN_HARDENING
657
/* conversion from malloc headers to user pointers, and back */
658
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES + REDZONE_SIZE))
659
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES - REDZONE_SIZE))
660
#else
661
/* conversion from malloc headers to user pointers, and back */
662
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
663
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
664
#endif
665
/* chunk associated with aligned address A */
666
#define align_as_chunk(A)        (mchunkptr)((A) + align_offset(chunk2mem(A)))
667

668
/* Bounds on request (not chunk) sizes. */
669
#define MAX_REQUEST                ((~MIN_CHUNK_SIZE+1) << 2)
670
#define MIN_REQUEST                (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
671

672
#if LUAJIT_USE_ASAN_HARDENING
673
static int asan_malloc_sizes(size_t nsize, size_t *mem_size, size_t *poison_size)
674
{
675
  size_t aligned;
676
  if (nsize == 0)
677
    nsize = MIN_CHUNK_SIZE;
678
  if (nsize >= MAX_REQUEST)
679
    return 1;
680
  if (asan_align_size(nsize, SIZE_ALIGNMENT, &aligned) ||
681
      asan_add_overflow(aligned, TOTAL_REDZONE_SIZE, poison_size) ||
682
      *poison_size >= MAX_REQUEST)
683
    return 1;
684
  *mem_size = nsize;
685
  return 0;
686
}
687
#endif
688

689
/* pad request bytes into a usable size */
690
#define pad_request(req) \
691
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
692

693
/* pad request, checking for minimum (but not maximum) */
694
#define request2size(req) \
695
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
696

697
/* ------------------ Operations on head and foot fields ----------------- */
698

699
#define PINUSE_BIT                (SIZE_T_ONE)
700
#define CINUSE_BIT                (SIZE_T_TWO)
701
#define INUSE_BITS                (PINUSE_BIT|CINUSE_BIT)
702

703
/* Head value for fenceposts */
704
#define FENCEPOST_HEAD                (INUSE_BITS|SIZE_T_SIZE)
705

706
/* extraction of fields from head words */
707
#define cinuse(p)                ((p)->head & CINUSE_BIT)
708
#define pinuse(p)                ((p)->head & PINUSE_BIT)
709
#define chunksize(p)                ((p)->head & ~(INUSE_BITS))
710

711
#define clear_pinuse(p)                ((p)->head &= ~PINUSE_BIT)
712
#define clear_cinuse(p)                ((p)->head &= ~CINUSE_BIT)
713

714
/* Treat space at ptr +/- offset as a chunk */
715
#define chunk_plus_offset(p, s)                ((mchunkptr)(((char *)(p)) + (s)))
716
#define chunk_minus_offset(p, s)        ((mchunkptr)(((char *)(p)) - (s)))
717

718
/* Ptr to next or previous physical malloc_chunk. */
719
#define next_chunk(p)        ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
720
#define prev_chunk(p)        ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
721

722
/* extract next chunk's pinuse bit */
723
#define next_pinuse(p)        ((next_chunk(p)->head) & PINUSE_BIT)
724

725
/* Get/set size at footer */
726
#define get_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot)
727
#define set_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
728

729
/* Set size, pinuse bit, and foot */
730
#define set_size_and_pinuse_of_free_chunk(p, s)\
731
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
732

733
/* Set size, pinuse bit, foot, and clear next pinuse */
734
#define set_free_with_pinuse(p, s, n)\
735
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
736

737
#define is_direct(p)\
738
  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
739

740
/* Get the internal overhead associated with chunk p */
741
#define overhead_for(p)\
742
 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
743

744
#if LUAJIT_USE_ASAN_HARDENING
745
static void asan_unpoison_chunk_for_free(void *ptr)
746
{
747
  mchunkptr p = mem2chunk(ptr);
748
  size_t psize = chunksize(p);
749
  ASAN_UNPOISON_MEMORY_REGION(p, psize + SIZE_T_SIZE);
750
}
751

752
static void asan_quarantine_release_one(void)
753
{
754
  asan_quarantine_entry *entry = &asan_quarantine[asan_quarantine_head];
755
  void *msp = entry->msp;
756
  void *ptr = entry->ptr;
757
  size_t size = entry->size;
758

759
  asan_quarantine_head = (asan_quarantine_head + 1) % ASAN_QUARANTINE_MAX;
760
  asan_quarantine_count--;
761
  asan_quarantine_bytes -= size;
762
  entry->msp = NULL;
763
  entry->ptr = NULL;
764
  entry->size = 0;
765

766
  asan_unpoison_chunk_for_free(ptr);
767
  lj_alloc_free_raw(msp, ptr);
768
}
769

770
static void asan_quarantine_push(void *msp, void *ptr, size_t size)
771
{
772
  if (size > ASAN_QUARANTINE_MAX_BYTES) {
773
    asan_unpoison_chunk_for_free(ptr);
774
    lj_alloc_free_raw(msp, ptr);
775
    return;
776
  }
777

778
  while (asan_quarantine_count == ASAN_QUARANTINE_MAX ||
779
         asan_quarantine_bytes + size > ASAN_QUARANTINE_MAX_BYTES)
780
    asan_quarantine_release_one();
781

782
  size_t idx = (asan_quarantine_head + asan_quarantine_count) %
783
               ASAN_QUARANTINE_MAX;
784
  asan_quarantine[idx].msp = msp;
785
  asan_quarantine[idx].ptr = ptr;
786
  asan_quarantine[idx].size = size;
787
  asan_quarantine_count++;
788
  asan_quarantine_bytes += size;
789
}
790

791
static void asan_quarantine_drain_msp(void *msp)
792
{
793
  size_t i, kept = 0;
794
  asan_quarantine_entry kept_entries[ASAN_QUARANTINE_MAX];
795

796
  for (i = 0; i < asan_quarantine_count; i++) {
797
    size_t idx = (asan_quarantine_head + i) % ASAN_QUARANTINE_MAX;
798
    asan_quarantine_entry entry = asan_quarantine[idx];
799
    if (entry.msp == msp) {
800
      asan_quarantine_bytes -= entry.size;
801
      asan_unpoison_chunk_for_free(entry.ptr);
802
      lj_alloc_free_raw(entry.msp, entry.ptr);
803
    } else {
804
      kept_entries[kept++] = entry;
805
    }
806
  }
807

808
  memset(asan_quarantine, 0, sizeof(asan_quarantine));
809
  for (i = 0; i < kept; i++)
810
    asan_quarantine[i] = kept_entries[i];
811
  asan_quarantine_head = 0;
812
  asan_quarantine_count = kept;
813
}
814
#endif
815

816
/* ---------------------- Overlaid data structures ----------------------- */
817

818
struct malloc_tree_chunk {
819
  /* The first four fields must be compatible with malloc_chunk */
820
  size_t                    prev_foot;
821
  size_t                    head;
822
  struct malloc_tree_chunk *fd;
823
  struct malloc_tree_chunk *bk;
824

825
  struct malloc_tree_chunk *child[2];
826
  struct malloc_tree_chunk *parent;
827
  bindex_t                  index;
828
};
829

830
typedef struct malloc_tree_chunk  tchunk;
831
typedef struct malloc_tree_chunk *tchunkptr;
832
typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
833

834
/* A little helper macro for trees */
835
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
836

837
/* ----------------------------- Segments -------------------------------- */
838

839
struct malloc_segment {
840
  char        *base;             /* base address */
841
  size_t       size;             /* allocated size */
842
  struct malloc_segment *next;   /* ptr to next segment */
843
};
844

845
typedef struct malloc_segment  msegment;
846
typedef struct malloc_segment *msegmentptr;
847

848
/* ---------------------------- malloc_state ----------------------------- */
849

850
/* Bin types, widths and sizes */
851
#define NSMALLBINS                (32U)
852
#define NTREEBINS                (32U)
853
#define SMALLBIN_SHIFT                (3U)
854
#define SMALLBIN_WIDTH                (SIZE_T_ONE << SMALLBIN_SHIFT)
855
#define TREEBIN_SHIFT                (8U)
856
#define MIN_LARGE_SIZE                (SIZE_T_ONE << TREEBIN_SHIFT)
857
#define MAX_SMALL_SIZE                (MIN_LARGE_SIZE - SIZE_T_ONE)
858
#define MAX_SMALL_REQUEST  (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
859

860
struct malloc_state {
861
  binmap_t   smallmap;
862
  binmap_t   treemap;
863
  size_t     dvsize;
864
  size_t     topsize;
865
  mchunkptr  dv;
866
  mchunkptr  top;
867
  size_t     trim_check;
868
  size_t     release_checks;
869
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
870
  tbinptr    treebins[NTREEBINS];
871
  msegment   seg;
872
};
873

874
typedef struct malloc_state *mstate;
875

876
#define is_initialized(M)        ((M)->top != 0)
877

878
/* -------------------------- system alloc setup ------------------------- */
879

880
/* page-align a size */
881
#define page_align(S)\
882
 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
883

884
/* granularity-align a size */
885
#define granularity_align(S)\
886
  (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
887
   & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
888

889
#if LJ_TARGET_WINDOWS
890
#define mmap_align(S)        granularity_align(S)
891
#else
892
#define mmap_align(S)        page_align(S)
893
#endif
894

895
/*  True if segment S holds address A */
896
#define segment_holds(S, A)\
897
  ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
898

899
/* Return segment holding given address */
900
static msegmentptr segment_holding(mstate m, char *addr)
134✔
901
{
902
  msegmentptr sp = &m->seg;
134✔
903
  for (;;) {
134✔
904
    if (addr >= sp->base && addr < sp->base + sp->size)
134✔
905
      return sp;
906
    if ((sp = sp->next) == 0)
×
907
      return 0;
908
  }
909
}
910

911
/* Return true if segment contains a segment link */
912
static int has_segment_link(mstate m, msegmentptr ss)
20✔
913
{
914
  msegmentptr sp = &m->seg;
20✔
915
  for (;;) {
88✔
916
    if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
88✔
917
      return 1;
918
    if ((sp = sp->next) == 0)
88✔
919
      return 0;
920
  }
921
}
922

923
/*
924
  TOP_FOOT_SIZE is padding at the end of a segment, including space
925
  that may be needed to place segment records and fenceposts when new
926
  noncontiguous segments are added.
927
*/
928
#define TOP_FOOT_SIZE\
929
  (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
930

931
/* ---------------------------- Indexing Bins ---------------------------- */
932

933
#define is_small(s)                (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
934
#define small_index(s)                ((s)  >> SMALLBIN_SHIFT)
935
#define small_index2size(i)        ((i)  << SMALLBIN_SHIFT)
936
#define MIN_SMALL_INDEX                (small_index(MIN_CHUNK_SIZE))
937

938
/* addressing by index. See above about smallbin repositioning */
939
#define smallbin_at(M, i)        ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
940
#define treebin_at(M,i)                (&((M)->treebins[i]))
941

942
/* assign tree index for size S to variable I */
943
#define compute_tree_index(S, I)\
944
{\
945
  unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
946
  if (X == 0) {\
947
    I = 0;\
948
  } else if (X > 0xFFFF) {\
949
    I = NTREEBINS-1;\
950
  } else {\
951
    unsigned int K = lj_fls(X);\
952
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
953
  }\
954
}
955

956
/* Bit representing maximum resolved size in a treebin at i */
957
#define bit_for_tree_index(i) \
958
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
959

960
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
961
#define leftshift_for_tree_index(i) \
962
   ((i == NTREEBINS-1)? 0 : \
963
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
964

965
/* The size of the smallest chunk held in bin with index i */
966
#define minsize_for_tree_index(i) \
967
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
968
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
969

970
/* ------------------------ Operations on bin maps ----------------------- */
971

972
/* bit corresponding to given index */
973
#define idx2bit(i)                ((binmap_t)(1) << (i))
974

975
/* Mark/Clear bits with given index */
976
#define mark_smallmap(M,i)        ((M)->smallmap |=  idx2bit(i))
977
#define clear_smallmap(M,i)        ((M)->smallmap &= ~idx2bit(i))
978
#define smallmap_is_marked(M,i)        ((M)->smallmap &   idx2bit(i))
979

980
#define mark_treemap(M,i)        ((M)->treemap  |=  idx2bit(i))
981
#define clear_treemap(M,i)        ((M)->treemap  &= ~idx2bit(i))
982
#define treemap_is_marked(M,i)        ((M)->treemap  &   idx2bit(i))
983

984
/* mask with all bits to left of least bit of x on */
985
#define left_bits(x)                ((x<<1) | (~(x<<1)+1))
986

987
/* Set cinuse bit and pinuse bit of next chunk */
988
#define set_inuse(M,p,s)\
989
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
990
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
991

992
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
993
#define set_inuse_and_pinuse(M,p,s)\
994
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
995
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
996

997
/* Set size, cinuse and pinuse bit of this chunk */
998
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
999
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1000

1001
/* ----------------------- Operations on smallbins ----------------------- */
1002

1003
/* Link a free chunk into a smallbin  */
1004
#define insert_small_chunk(M, P, S) {\
1005
  bindex_t I = small_index(S);\
1006
  mchunkptr B = smallbin_at(M, I);\
1007
  mchunkptr F = B;\
1008
  if (!smallmap_is_marked(M, I))\
1009
    mark_smallmap(M, I);\
1010
  else\
1011
    F = B->fd;\
1012
  B->fd = P;\
1013
  F->bk = P;\
1014
  P->fd = F;\
1015
  P->bk = B;\
1016
}
1017

1018
/* Unlink a chunk from a smallbin  */
1019
#define unlink_small_chunk(M, P, S) {\
1020
  mchunkptr F = P->fd;\
1021
  mchunkptr B = P->bk;\
1022
  bindex_t I = small_index(S);\
1023
  if (F == B) {\
1024
    clear_smallmap(M, I);\
1025
  } else {\
1026
    F->bk = B;\
1027
    B->fd = F;\
1028
  }\
1029
}
1030

1031
/* Unlink the first chunk from a smallbin */
1032
#define unlink_first_small_chunk(M, B, P, I) {\
1033
  mchunkptr F = P->fd;\
1034
  if (B == F) {\
1035
    clear_smallmap(M, I);\
1036
  } else {\
1037
    B->fd = F;\
1038
    F->bk = B;\
1039
  }\
1040
}
1041

1042
/* Replace dv node, binning the old one */
1043
/* Used only when dvsize known to be small */
1044
#define replace_dv(M, P, S) {\
1045
  size_t DVS = M->dvsize;\
1046
  if (DVS != 0) {\
1047
    mchunkptr DV = M->dv;\
1048
    insert_small_chunk(M, DV, DVS);\
1049
  }\
1050
  M->dvsize = S;\
1051
  M->dv = P;\
1052
}
1053

1054
/* ------------------------- Operations on trees ------------------------- */
1055

1056
/* Insert chunk into tree */
1057
#define insert_large_chunk(M, X, S) {\
1058
  tbinptr *H;\
1059
  bindex_t I;\
1060
  compute_tree_index(S, I);\
1061
  H = treebin_at(M, I);\
1062
  X->index = I;\
1063
  X->child[0] = X->child[1] = 0;\
1064
  if (!treemap_is_marked(M, I)) {\
1065
    mark_treemap(M, I);\
1066
    *H = X;\
1067
    X->parent = (tchunkptr)H;\
1068
    X->fd = X->bk = X;\
1069
  } else {\
1070
    tchunkptr T = *H;\
1071
    size_t K = S << leftshift_for_tree_index(I);\
1072
    for (;;) {\
1073
      if (chunksize(T) != S) {\
1074
        tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
1075
        K <<= 1;\
1076
        if (*C != 0) {\
1077
          T = *C;\
1078
        } else {\
1079
          *C = X;\
1080
          X->parent = T;\
1081
          X->fd = X->bk = X;\
1082
          break;\
1083
        }\
1084
      } else {\
1085
        tchunkptr F = T->fd;\
1086
        T->fd = F->bk = X;\
1087
        X->fd = F;\
1088
        X->bk = T;\
1089
        X->parent = 0;\
1090
        break;\
1091
      }\
1092
    }\
1093
  }\
1094
}
1095

1096
#define unlink_large_chunk(M, X) {\
1097
  tchunkptr XP = X->parent;\
1098
  tchunkptr R;\
1099
  if (X->bk != X) {\
1100
    tchunkptr F = X->fd;\
1101
    R = X->bk;\
1102
    F->bk = R;\
1103
    R->fd = F;\
1104
  } else {\
1105
    tchunkptr *RP;\
1106
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
1107
        ((R = *(RP = &(X->child[0]))) != 0)) {\
1108
      tchunkptr *CP;\
1109
      while ((*(CP = &(R->child[1])) != 0) ||\
1110
             (*(CP = &(R->child[0])) != 0)) {\
1111
        R = *(RP = CP);\
1112
      }\
1113
      *RP = 0;\
1114
    }\
1115
  }\
1116
  if (XP != 0) {\
1117
    tbinptr *H = treebin_at(M, X->index);\
1118
    if (X == *H) {\
1119
      if ((*H = R) == 0) \
1120
        clear_treemap(M, X->index);\
1121
    } else {\
1122
      if (XP->child[0] == X) \
1123
        XP->child[0] = R;\
1124
      else \
1125
        XP->child[1] = R;\
1126
    }\
1127
    if (R != 0) {\
1128
      tchunkptr C0, C1;\
1129
      R->parent = XP;\
1130
      if ((C0 = X->child[0]) != 0) {\
1131
        R->child[0] = C0;\
1132
        C0->parent = R;\
1133
      }\
1134
      if ((C1 = X->child[1]) != 0) {\
1135
        R->child[1] = C1;\
1136
        C1->parent = R;\
1137
      }\
1138
    }\
1139
  }\
1140
}
1141

1142
/* Relays to large vs small bin operations */
1143

1144
#define insert_chunk(M, P, S)\
1145
  if (is_small(S)) { insert_small_chunk(M, P, S)\
1146
  } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
1147

1148
#define unlink_chunk(M, P, S)\
1149
  if (is_small(S)) { unlink_small_chunk(M, P, S)\
1150
  } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
1151

1152
/* -----------------------  Direct-mmapping chunks ----------------------- */
1153

1154
static void *direct_alloc(size_t nb)
3,074✔
1155
{
1156
#if LUAJIT_USE_ASAN_HARDENING
1157
  if (asan_add_overflow(nb, TOTAL_REDZONE_SIZE, &nb))
1158
    return NULL;
1159
#endif
1160
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3,074✔
1161
#if LUAJIT_USE_ASAN_HARDENING
1162
  mmsize -= TOTAL_REDZONE_SIZE;
1163
#endif
1164
  if (LJ_LIKELY(mmsize > nb)) {     /* Check for wrap around 0 */
3,074✔
1165
    char *mm = (char *)(DIRECT_MMAP(mmsize));
3,074✔
1166
    if (mm != CMFAIL) {
3,074✔
1167
      size_t offset = align_offset(chunk2mem(mm));
3,074✔
1168
      size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
3,074✔
1169
      mchunkptr p = (mchunkptr)(mm + offset);
3,074✔
1170
      p->prev_foot = offset | IS_DIRECT_BIT;
3,074✔
1171
      p->head = psize|CINUSE_BIT;
3,074✔
1172
      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3,074✔
1173
      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
3,074✔
1174
      return chunk2mem(p);
3,074✔
1175
    }
1176
  }
1177
  return NULL;
1178
}
1179

1180
static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
278✔
1181
{
1182
  size_t oldsize = chunksize(oldp);
278✔
1183
  if (is_small(nb)) /* Can't shrink direct regions below small size */
278✔
1184
    return NULL;
1185
  /* Keep old chunk if big enough but not too big */
1186
  if (oldsize >= nb + SIZE_T_SIZE &&
275✔
1187
      (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
133✔
1188
    return oldp;
1189
  } else {
1190
    size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
178✔
1191
    size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
178✔
1192
    size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
178✔
1193
    char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
178✔
1194
                                   oldmmsize, newmmsize, CALL_MREMAP_MV);
1195
    if (cp != CMFAIL) {
178✔
1196
      mchunkptr newp = (mchunkptr)(cp + offset);
178✔
1197
      size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
178✔
1198
      newp->head = psize|CINUSE_BIT;
178✔
1199
      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
178✔
1200
      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
178✔
1201
      return newp;
178✔
1202
    }
1203
  }
1204
  return NULL;
1205
}
1206

1207
/* -------------------------- mspace management -------------------------- */
1208

1209
/* Initialize top chunk and its size */
1210
static void init_top(mstate m, mchunkptr p, size_t psize)
1,651✔
1211
{
1212
  /* Ensure alignment */
1213
  void *t = chunk2mem(p);
1,651✔
1214
#if LUAJIT_USE_ASAN_HARDENING
1215
  t = mem2alloc(t);
1216
#endif
1217
  size_t offset = align_offset(t);
×
1218

1219
  p = (mchunkptr)((char *)p + offset);
1,651✔
1220
  psize -= offset;
1,651✔
1221

1222
  m->top = p;
1,651✔
1223
  m->topsize = psize;
1,651✔
1224
  p->head = psize | PINUSE_BIT;
1,651✔
1225
  /* set size of fake trailing chunk holding overhead space only once */
1226
  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
1,651✔
1227
  m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
1,651✔
1228
}
1,129✔
1229

1230
/* Initialize bins for a new mstate that is otherwise zeroed out */
1231
static void init_bins(mstate m)
408✔
1232
{
1233
  /* Establish circular links for smallbins */
1234
  bindex_t i;
408✔
1235
  for (i = 0; i < NSMALLBINS; i++) {
13,464✔
1236
    sbinptr bin = smallbin_at(m,i);
13,056✔
1237
    bin->fd = bin->bk = bin;
13,056✔
1238
  }
1239
}
1240

1241
/* Allocate chunk and prepend remainder with chunk in successor base. */
1242
static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
36,591✔
1243
{
1244
  mchunkptr p = align_as_chunk(newbase);
36,591✔
1245
  mchunkptr oldfirst = align_as_chunk(oldbase);
36,591✔
1246
  size_t psize = (size_t)((char *)oldfirst - (char *)p);
36,591✔
1247
  mchunkptr q = chunk_plus_offset(p, nb);
36,591✔
1248
  size_t qsize = psize - nb;
36,591✔
1249
  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
36,591✔
1250

1251
  /* consolidate remainder with first chunk of old base */
1252
  if (oldfirst == m->top) {
36,591✔
1253
    size_t tsize = m->topsize += qsize;
×
1254
    m->top = q;
×
1255
    q->head = tsize | PINUSE_BIT;
×
1256
  } else if (oldfirst == m->dv) {
36,591✔
1257
    size_t dsize = m->dvsize += qsize;
×
1258
    m->dv = q;
×
1259
    set_size_and_pinuse_of_free_chunk(q, dsize);
×
1260
  } else {
1261
    if (!cinuse(oldfirst)) {
36,591✔
1262
      size_t nsize = chunksize(oldfirst);
18✔
1263
      unlink_chunk(m, oldfirst, nsize);
22✔
1264
      oldfirst = chunk_plus_offset(oldfirst, nsize);
18✔
1265
      qsize += nsize;
18✔
1266
    }
1267
    set_free_with_pinuse(q, qsize, oldfirst);
36,591✔
1268
    insert_chunk(m, q, qsize);
36,591✔
1269
  }
1270

1271
  return chunk2mem(p);
36,591✔
1272
}
1273

1274
/* Add a segment to hold a new noncontiguous region */
1275
static void add_segment(mstate m, char *tbase, size_t tsize)
114✔
1276
{
1277
  /* Determine locations and sizes of segment, fenceposts, old top */
1278
  char *old_top = (char *)m->top;
114✔
1279
  msegmentptr oldsp = segment_holding(m, old_top);
114✔
1280
#if LUAJIT_USE_ASAN_HARDENING
1281
  ASAN_UNPOISON_MEMORY_REGION(oldsp, sizeof(struct malloc_segment));
1282
#endif
1283
  char *old_end = oldsp->base + oldsp->size;
114✔
1284
  size_t ssize = pad_request(sizeof(struct malloc_segment));
114✔
1285
  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
114✔
1286
  size_t offset = align_offset(chunk2mem(rawsp));
114✔
1287
  char *asp = rawsp + offset;
114✔
1288
  char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
114✔
1289
  mchunkptr sp = (mchunkptr)csp;
114✔
1290
  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
114✔
1291
#if LUAJIT_USE_ASAN_HARDENING
1292
  ss = (msegmentptr)(mem2alloc(ss));
1293
#endif
1294
  mchunkptr tnext = chunk_plus_offset(sp, ssize);
114✔
1295
  mchunkptr p = tnext;
114✔
1296

1297
  /* reset top to new space */
1298
  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
114✔
1299

1300
  /* Set up segment record */
1301
  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
114✔
1302
  *ss = m->seg; /* Push current record */
114✔
1303
  m->seg.base = tbase;
114✔
1304
  m->seg.size = tsize;
114✔
1305
  m->seg.next = ss;
114✔
1306

1307
  /* Insert trailing fenceposts */
1308
  for (;;) {
369✔
1309
    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
369✔
1310
    p->head = FENCEPOST_HEAD;
369✔
1311
    if ((char *)(&(nextp->head)) < old_end)
369✔
1312
      p = nextp;
1313
    else
1314
      break;
1315
  }
1316

1317
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1318
  if (csp != old_top) {
114✔
1319
    mchunkptr q = (mchunkptr)old_top;
102✔
1320
    size_t psize = (size_t)(csp - old_top);
102✔
1321
    mchunkptr tn = chunk_plus_offset(q, psize);
102✔
1322
    set_free_with_pinuse(q, psize, tn);
102✔
1323
    insert_chunk(m, q, psize);
110✔
1324
  }
1325
}
114✔
1326

1327
/* -------------------------- System allocation -------------------------- */
1328

1329
static void *alloc_sys(mstate m, size_t nb)
40,888✔
1330
{
1331
  char *tbase = CMFAIL;
40,888✔
1332
  size_t tsize = 0;
40,888✔
1333

1334
  /* Directly map large chunks */
1335
  if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
40,888✔
1336
    void *mem = direct_alloc(nb);
3,074✔
1337
    if (mem != 0)
3,074✔
1338
      return mem;
1339
  }
1340

1341
  {
1342
    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
37,814✔
1343
#if LUAJIT_USE_ASAN_HARDENING
1344
    if (asan_add_overflow(req, TOTAL_REDZONE_SIZE, &req))
1345
      return NULL;
1346
#endif
1347
    size_t rsize = granularity_align(req);
37,814✔
1348
#if LUAJIT_USE_ASAN_HARDENING
1349
    rsize -= TOTAL_REDZONE_SIZE;
1350
#endif
1351
    if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
37,814✔
1352
      char *mp = (char *)(CALL_MMAP(rsize));
37,814✔
1353
      if (mp != CMFAIL) {
37,814✔
1354
        tbase = mp;
37,814✔
1355
        tsize = rsize;
37,814✔
1356
      }
1357
    }
1358
  }
1359

1360
  if (tbase != CMFAIL) {
37,814✔
1361
    msegmentptr sp = &m->seg;
37,814✔
1362
    /* Try to merge with an existing segment */
1363
    while (sp != 0 && tbase != sp->base + sp->size)
340,814✔
1364
      sp = sp->next;
303,000✔
1365
    if (sp != 0 && segment_holds(sp, m->top)) { /* append */
37,814✔
1366
      sp->size += tsize;
1,109✔
1367
      init_top(m, m->top, m->topsize + tsize);
1,109✔
1368
    } else {
1369
      sp = &m->seg;
1370
      while (sp != 0 && sp->base != tbase + tsize)
61,312✔
1371
        sp = sp->next;
24,607✔
1372
      if (sp != 0) {
36,705✔
1373
        char *oldbase = sp->base;
36,591✔
1374
        sp->base = tbase;
36,591✔
1375
        sp->size += tsize;
36,591✔
1376
        return prepend_alloc(m, tbase, oldbase, nb);
36,591✔
1377
      } else {
1378
        add_segment(m, tbase, tsize);
114✔
1379
      }
1380
    }
1381

1382
    if (nb < m->topsize) { /* Allocate from new or extended top space */
1,223✔
1383
      size_t rsize = m->topsize -= nb;
1,223✔
1384
      mchunkptr p = m->top;
1,223✔
1385
      mchunkptr r = m->top = chunk_plus_offset(p, nb);
1,223✔
1386
      r->head = rsize | PINUSE_BIT;
1,223✔
1387
      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1,223✔
1388
      return chunk2mem(p);
1,223✔
1389
    }
1390
  }
1391

1392
  return NULL;
1393
}
1394

1395
/* -----------------------  system deallocation -------------------------- */
1396

1397
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1398
static size_t release_unused_segments(mstate m)
660,159✔
1399
{
1400
  size_t released = 0;
660,159✔
1401
  size_t nsegs = 0;
660,159✔
1402
  msegmentptr pred = &m->seg;
660,159✔
1403
  msegmentptr sp = pred->next;
660,159✔
1404
  while (sp != 0) {
7,080,941✔
1405
    char *base = sp->base;
6,420,782✔
1406
    size_t size = sp->size;
6,420,782✔
1407
    msegmentptr next = sp->next;
6,420,782✔
1408
    nsegs++;
6,420,782✔
1409
    {
1410
      mchunkptr p = align_as_chunk(base);
6,420,782✔
1411
      size_t psize = chunksize(p);
6,420,782✔
1412
      /* Can unmap if first chunk holds entire segment and not pinned */
1413
      if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
6,420,782✔
1414
        tchunkptr tp = (tchunkptr)p;
74✔
1415
        if (p == m->dv) {
74✔
1416
          m->dv = 0;
×
1417
          m->dvsize = 0;
×
1418
        } else {
1419
          unlink_large_chunk(m, tp);
77✔
1420
        }
1421
        if (CALL_MUNMAP(base, size) == 0) {
74✔
1422
          released += size;
74✔
1423
          /* unlink obsoleted record */
1424
          sp = pred;
74✔
1425
          sp->next = next;
74✔
1426
        } else { /* back out if cannot unmap */
1427
          insert_large_chunk(m, tp, psize);
×
1428
        }
1429
      }
1430
    }
1431
    pred = sp;
1432
    sp = next;
1433
  }
1434
  /* Reset check counter */
1435
  m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
660,159✔
1436
                      nsegs : MAX_RELEASE_CHECK_RATE;
660,159✔
1437
  return released;
660,159✔
1438
}
1439

1440
static int alloc_trim(mstate m, size_t pad)
20✔
1441
{
1442
  size_t released = 0;
20✔
1443
  if (pad < MAX_REQUEST && is_initialized(m)) {
20✔
1444
    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
20✔
1445

1446
    if (m->topsize > pad) {
20✔
1447
      /* Shrink top space in granularity-size units, keeping at least one */
1448
      size_t unit = DEFAULT_GRANULARITY;
20✔
1449
      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
20✔
1450
                      SIZE_T_ONE) * unit;
1451
      msegmentptr sp = segment_holding(m, (char *)m->top);
20✔
1452

1453
      if (sp->size >= extra &&
20✔
1454
          !has_segment_link(m, sp)) { /* can't shrink if pinned */
40✔
1455
        size_t newsize = sp->size - extra;
20✔
1456
        /* Prefer mremap, fall back to munmap */
1457
        if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
20✔
NEW
1458
            (CALL_MUNMAP_TAIL(sp->base + newsize, extra) == 0)) {
×
1459
          released = extra;
20✔
1460
        }
1461
      }
1462

1463
      if (released != 0) {
20✔
1464
        sp->size -= released;
20✔
1465
        init_top(m, m->top, m->topsize - released);
20✔
1466
      }
1467
    }
1468

1469
    /* Unmap any unused mmapped segments */
1470
    released += release_unused_segments(m);
20✔
1471

1472
    /* On failure, disable autotrim to avoid repeated failed future calls */
1473
    if (released == 0 && m->topsize > m->trim_check)
20✔
1474
      m->trim_check = MAX_SIZE_T;
×
1475
  }
1476

1477
  return (released != 0)? 1 : 0;
20✔
1478
}
1479

1480
/* ---------------------------- malloc support --------------------------- */
1481

1482
/* allocate a large request from the best fitting chunk in a treebin */
1483
static void *tmalloc_large(mstate m, size_t nb)
294,638✔
1484
{
1485
  tchunkptr v = 0;
294,638✔
1486
  size_t rsize = ~nb+1; /* Unsigned negation */
294,638✔
1487
  tchunkptr t;
294,638✔
1488
  bindex_t idx;
294,638✔
1489
  compute_tree_index(nb, idx);
294,638✔
1490

1491
  if ((t = *treebin_at(m, idx)) != 0) {
294,638✔
1492
    /* Traverse tree for this bin looking for node with size == nb */
1493
    size_t sizebits = nb << leftshift_for_tree_index(idx);
122,443✔
1494
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
122,443✔
1495
    for (;;) {
258,761✔
1496
      tchunkptr rt;
190,602✔
1497
      size_t trem = chunksize(t) - nb;
190,602✔
1498
      if (trem < rsize) {
190,602✔
1499
        v = t;
108,810✔
1500
        if ((rsize = trem) == 0)
108,810✔
1501
          break;
1502
      }
1503
      rt = t->child[1];
167,113✔
1504
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
167,113✔
1505
      if (rt != 0 && rt != t)
167,113✔
1506
        rst = rt;
54,499✔
1507
      if (t == 0) {
167,113✔
1508
        t = rst; /* set t to least subtree holding sizes > nb */
1509
        break;
1510
      }
1511
      sizebits <<= 1;
68,159✔
1512
    }
1513
  }
1514

1515
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
294,638✔
1516
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
226,847✔
1517
    if (leftbits != 0)
226,847✔
1518
      t = *treebin_at(m, lj_ffs(leftbits));
214,015✔
1519
  }
1520

1521
  while (t != 0) { /* find smallest of tree or subtree */
675,692✔
1522
    size_t trem = chunksize(t) - nb;
381,054✔
1523
    if (trem < rsize) {
381,054✔
1524
      rsize = trem;
291,187✔
1525
      v = t;
291,187✔
1526
    }
1527
    t = leftmost_child(t);
381,054✔
1528
  }
1529

1530
  /*  If dv is a better fit, return NULL so malloc will use it */
1531
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
294,638✔
1532
    mchunkptr r = chunk_plus_offset(v, nb);
243,815✔
1533
    unlink_large_chunk(m, v);
259,178✔
1534
    if (rsize < MIN_CHUNK_SIZE) {
243,815✔
1535
      set_inuse_and_pinuse(m, v, (rsize + nb));
35,442✔
1536
    } else {
1537
      set_size_and_pinuse_of_inuse_chunk(m, v, nb);
208,373✔
1538
      set_size_and_pinuse_of_free_chunk(r, rsize);
208,373✔
1539
      insert_chunk(m, r, rsize);
237,344✔
1540
    }
1541
    return chunk2mem(v);
243,815✔
1542
  }
1543
  return NULL;
1544
}
1545

1546
/* allocate a small request from the best fitting chunk in a treebin */
1547
static void *tmalloc_small(mstate m, size_t nb)
191,965✔
1548
{
1549
  tchunkptr t, v;
191,965✔
1550
  mchunkptr r;
191,965✔
1551
  size_t rsize;
191,965✔
1552
  bindex_t i = lj_ffs(m->treemap);
191,965✔
1553

1554
  v = t = *treebin_at(m, i);
191,965✔
1555
  rsize = chunksize(t) - nb;
191,965✔
1556

1557
  while ((t = leftmost_child(t)) != 0) {
404,598✔
1558
    size_t trem = chunksize(t) - nb;
212,633✔
1559
    if (trem < rsize) {
212,633✔
1560
      rsize = trem;
114,650✔
1561
      v = t;
114,650✔
1562
    }
1563
  }
1564

1565
  r = chunk_plus_offset(v, nb);
191,965✔
1566
  unlink_large_chunk(m, v);
227,488✔
1567
  if (rsize < MIN_CHUNK_SIZE) {
191,965✔
1568
    set_inuse_and_pinuse(m, v, (rsize + nb));
36✔
1569
  } else {
1570
    set_size_and_pinuse_of_inuse_chunk(m, v, nb);
191,929✔
1571
    set_size_and_pinuse_of_free_chunk(r, rsize);
191,929✔
1572
    replace_dv(m, r, rsize);
191,929✔
1573
  }
1574
  return chunk2mem(v);
191,965✔
1575
}
1576

1577
/* ----------------------------------------------------------------------- */
1578

1579
void *lj_alloc_create(void)
408✔
1580
{
1581
  size_t tsize = DEFAULT_GRANULARITY;
408✔
1582
#if LUAJIT_USE_ASAN_HARDENING
1583
  tsize -= TOTAL_REDZONE_SIZE;
1584
#endif
1585
  char *tbase;
408✔
1586
  INIT_MMAP();
408✔
1587
  tbase = (char *)(CALL_MMAP(tsize));
408✔
1588
  if (tbase != CMFAIL) {
408✔
1589
    size_t msize = pad_request(sizeof(struct malloc_state));
408✔
1590
    mchunkptr mn;
408✔
1591
#if LUAJIT_USE_ASAN_HARDENING
1592
    mchunkptr msp = (mchunkptr)(tbase + align_offset(mem2alloc(chunk2mem(tbase))));
1593
    mstate m = (mstate)(mem2alloc(chunk2mem(msp)));
1594
#else
1595
    mchunkptr msp = align_as_chunk(tbase);
408✔
1596
    mstate m = (mstate)(chunk2mem(msp));
408✔
1597
#endif
1598
    memset(m, 0, msize);
408✔
1599
    msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
408✔
1600
    m->seg.base = tbase;
408✔
1601
    m->seg.size = tsize;
408✔
1602
    m->release_checks = MAX_RELEASE_CHECK_RATE;
408✔
1603
    init_bins(m);
408✔
1604
#if LUAJIT_USE_ASAN_HARDENING
1605
    mn = next_chunk((mchunkptr)((char *)(m) - TWO_SIZE_T_SIZES));
1606
#else
1607
    mn = next_chunk(mem2chunk(m));
408✔
1608
#endif
1609
    init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
408✔
1610
    return m;
408✔
1611
  }
1612
  return NULL;
1613
}
1614

1615
void lj_alloc_destroy(void *msp)
398✔
1616
{
1617
  mstate ms = (mstate)msp;
398✔
1618
  msegmentptr sp = &ms->seg;
398✔
1619
#if LUAJIT_USE_ASAN_HARDENING
1620
  asan_quarantine_drain_msp(msp);
1621
#endif
1622
  while (sp != 0) {
836✔
1623
    char *base = sp->base;
438✔
1624
    size_t size = sp->size;
438✔
1625
    sp = sp->next;
438✔
1626
#if LUAJIT_USE_ASAN_HARDENING
1627
    ASAN_UNPOISON_MEMORY_REGION(base, size);
1628
#endif
1629
    CALL_MUNMAP(base, size);
438✔
1630
  }
1631
}
398✔
1632

1633
static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
174,987,571✔
1634
{
1635
#if LUAJIT_USE_ASAN_HARDENING
1636
  size_t mem_size;
1637
  size_t poison_size;
1638
  if (asan_malloc_sizes(nsize, &mem_size, &poison_size))
1639
    return NULL;
1640
  nsize = poison_size;
1641
#endif
1642
  mstate ms = (mstate)msp;
174,987,571✔
1643
  void *mem;
174,987,571✔
1644
  size_t nb;
174,987,571✔
1645
  if (nsize <= MAX_SMALL_REQUEST) {
174,987,571✔
1646
    bindex_t idx;
174,656,564✔
1647
    binmap_t smallbits;
174,656,564✔
1648
    nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
174,656,564✔
1649
    idx = small_index(nb);
174,656,564✔
1650
    smallbits = ms->smallmap >> idx;
174,656,564✔
1651

1652
    if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
174,656,564✔
1653
      mchunkptr b, p;
579,160✔
1654
      idx += ~smallbits & 1;       /* Uses next bin if idx empty */
579,160✔
1655
      b = smallbin_at(ms, idx);
579,160✔
1656
      p = b->fd;
579,160✔
1657
      unlink_first_small_chunk(ms, b, p, idx);
579,160✔
1658
      set_inuse_and_pinuse(ms, p, small_index2size(idx));
579,160✔
1659
      mem = chunk2mem(p);
579,160✔
1660
#if LUAJIT_USE_ASAN_HARDENING
1661
      mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1662
#endif
1663
      return mem;
579,160✔
1664
    } else if (nb > ms->dvsize) {
174,077,404✔
1665
      if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
5,227,811✔
1666
        mchunkptr b, p, r;
135,139✔
1667
        size_t rsize;
135,139✔
1668
        binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
135,139✔
1669
        bindex_t i = lj_ffs(leftbits);
135,139✔
1670
        b = smallbin_at(ms, i);
135,139✔
1671
        p = b->fd;
135,139✔
1672
        unlink_first_small_chunk(ms, b, p, i);
135,139✔
1673
        rsize = small_index2size(i) - nb;
135,139✔
1674
        /* Fit here cannot be remainderless if 4byte sizes */
1675
        if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
135,139✔
1676
          set_inuse_and_pinuse(ms, p, small_index2size(i));
27,782✔
1677
        } else {
1678
          set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
107,357✔
1679
          r = chunk_plus_offset(p, nb);
107,357✔
1680
          set_size_and_pinuse_of_free_chunk(r, rsize);
107,357✔
1681
          replace_dv(ms, r, rsize);
107,357✔
1682
        }
1683
        mem = chunk2mem(p);
135,139✔
1684
#if LUAJIT_USE_ASAN_HARDENING
1685
  mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1686
#endif
1687
        return mem;
135,139✔
1688
      } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
5,092,672✔
1689
#if LUAJIT_USE_ASAN_HARDENING
1690
  mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1691
#endif
1692
        return mem;
1693
      }
1694
    }
1695
  } else if (nsize >= MAX_REQUEST) {
331,007✔
1696
    nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1697
  } else {
1698
    nb = pad_request(nsize);
331,007✔
1699
    if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
331,007✔
1700
#if LUAJIT_USE_ASAN_HARDENING
1701
      mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1702
#endif
1703
      return mem;
1704
    }
1705
  }
1706

1707
  if (nb <= ms->dvsize) {
173,837,492✔
1708
    size_t rsize = ms->dvsize - nb;
168,903,998✔
1709
    mchunkptr p = ms->dv;
168,903,998✔
1710
    if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
168,903,998✔
1711
      mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
168,726,398✔
1712
      ms->dvsize = rsize;
168,726,398✔
1713
      set_size_and_pinuse_of_free_chunk(r, rsize);
168,726,398✔
1714
      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
168,726,398✔
1715
    } else { /* exhaust dv */
1716
      size_t dvs = ms->dvsize;
177,600✔
1717
      ms->dvsize = 0;
177,600✔
1718
      ms->dv = 0;
177,600✔
1719
      set_inuse_and_pinuse(ms, p, dvs);
177,600✔
1720
    }
1721
    mem = chunk2mem(p);
168,903,998✔
1722
#if LUAJIT_USE_ASAN_HARDENING
1723
    mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1724
#endif
1725
    return mem;
168,903,998✔
1726
  } else if (nb < ms->topsize) { /* Split top */
4,933,494✔
1727
    size_t rsize = ms->topsize -= nb;
4,892,606✔
1728
    mchunkptr p = ms->top;
4,892,606✔
1729
    mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4,892,606✔
1730
    r->head = rsize | PINUSE_BIT;
4,892,606✔
1731
    set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4,892,606✔
1732
    mem = chunk2mem(p);
4,892,606✔
1733
#if LUAJIT_USE_ASAN_HARDENING
1734
    mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1735
#endif
1736
    return mem;
4,892,606✔
1737
  }
1738
#if LUAJIT_USE_ASAN_HARDENING
1739
  mem = alloc_sys(ms, nb);
1740
  return mem != NULL ? mark_memory_region(mem2alloc(mem), mem_size, poison_size) :
1741
                       NULL;
1742
#else
1743
  return alloc_sys(ms, nb);
40,888✔
1744
#endif
1745
}
1746

1747
static LJ_NOINLINE void *lj_alloc_free_raw(void *msp, void *ptr)
175,021,503✔
1748
{
1749
  if (ptr != 0) {
175,021,503✔
1750
    mchunkptr p = mem2chunk(ptr);
174,979,924✔
1751
    mstate fm = (mstate)msp;
174,979,924✔
1752
    size_t psize = chunksize(p);
174,979,924✔
1753
    mchunkptr next = chunk_plus_offset(p, psize);
174,979,924✔
1754
    if (!pinuse(p)) {
174,979,924✔
1755
      size_t prevsize = p->prev_foot;
27,214,739✔
1756
      if ((prevsize & IS_DIRECT_BIT) != 0) {
27,214,739✔
1757
        prevsize &= ~IS_DIRECT_BIT;
3,074✔
1758
        psize += prevsize + DIRECT_FOOT_PAD;
3,074✔
1759
        CALL_MUNMAP((char *)p - prevsize, psize);
3,074✔
1760
        return NULL;
3,074✔
1761
      } else {
1762
        mchunkptr prev = chunk_minus_offset(p, prevsize);
27,211,665✔
1763
        psize += prevsize;
27,211,665✔
1764
        p = prev;
27,211,665✔
1765
        /* consolidate backward */
1766
        if (p != fm->dv) {
27,211,665✔
1767
          unlink_chunk(fm, p, prevsize);
26,904,489✔
1768
        } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
382,951✔
1769
          fm->dvsize = psize;
275,708✔
1770
          set_free_with_pinuse(p, psize, next);
275,708✔
1771
          return NULL;
275,708✔
1772
        }
1773
      }
1774
    }
1775
    if (!cinuse(next)) {  /* consolidate forward */
174,701,142✔
1776
      if (next == fm->top) {
146,981,544✔
1777
        size_t tsize = fm->topsize += psize;
144,245✔
1778
        fm->top = p;
144,245✔
1779
        p->head = tsize | PINUSE_BIT;
144,245✔
1780
        if (p == fm->dv) {
144,245✔
1781
          fm->dv = 0;
300✔
1782
          fm->dvsize = 0;
300✔
1783
        }
1784
        if (tsize > fm->trim_check)
144,245✔
1785
          alloc_trim(fm, 0);
20✔
1786
        return NULL;
144,245✔
1787
      } else if (next == fm->dv) {
146,837,299✔
1788
        size_t dsize = fm->dvsize += psize;
878,066✔
1789
        fm->dv = p;
878,066✔
1790
        set_size_and_pinuse_of_free_chunk(p, dsize);
878,066✔
1791
        return NULL;
878,066✔
1792
      } else {
1793
        size_t nsize = chunksize(next);
145,959,233✔
1794
        psize += nsize;
145,959,233✔
1795
        unlink_chunk(fm, next, nsize);
146,040,157✔
1796
        set_size_and_pinuse_of_free_chunk(p, psize);
145,959,233✔
1797
        if (p == fm->dv) {
145,959,233✔
1798
          fm->dvsize = psize;
106,943✔
1799
          return NULL;
106,943✔
1800
        }
1801
      }
1802
    } else {
1803
      set_free_with_pinuse(p, psize, next);
27,719,598✔
1804
    }
1805

1806
    if (is_small(psize)) {
173,571,888✔
1807
      insert_small_chunk(fm, p, psize);
5,172,369✔
1808
    } else {
1809
      tchunkptr tp = (tchunkptr)p;
168,399,519✔
1810
      insert_large_chunk(fm, tp, psize);
185,750,846✔
1811
      if (--fm->release_checks == 0)
168,399,519✔
1812
        release_unused_segments(fm);
660,139✔
1813
    }
1814
  }
1815
  return NULL;
1816
}
1817

1818
static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
175,021,503✔
1819
{
1820
#if LUAJIT_USE_ASAN_HARDENING
1821
  if (ptr != 0) {
1822
    size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1823
    size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1824

1825
    memmove(ptr, ptr, mem_size);
1826
    ASAN_POISON_MEMORY_REGION(mem2alloc(ptr), poison_size);
1827
    asan_quarantine_push(msp, ptr, poison_size);
1828
  }
1829
  return NULL;
1830
#else
1831
  return lj_alloc_free_raw(msp, ptr);
175,021,503✔
1832
#endif
1833
}
1834

1835
static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
128,221✔
1836
{
1837
#if LUAJIT_USE_ASAN_HARDENING
1838
  if (nsize >= MAX_REQUEST)
1839
    return NULL;
1840

1841
  mstate m = (mstate)msp;
1842

1843
  size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1844

1845
  void *newmem = lj_alloc_malloc(m, nsize);
1846

1847
  if (newmem == NULL)
1848
    return NULL;
1849

1850
  memcpy(newmem, ptr, nsize > mem_size ? mem_size : nsize);
1851
  lj_alloc_free(msp, ptr);
1852
  return newmem;
1853
#else
1854
  if (nsize >= MAX_REQUEST) {
128,221✔
1855
    return NULL;
1856
  } else {
1857
    mstate m = (mstate)msp;
128,221✔
1858
    mchunkptr oldp = mem2chunk(ptr);
128,221✔
1859
    size_t oldsize = chunksize(oldp);
128,221✔
1860
    mchunkptr next = chunk_plus_offset(oldp, oldsize);
128,221✔
1861
    mchunkptr newp = 0;
128,221✔
1862
    size_t nb = request2size(nsize);
128,221✔
1863

1864
    /* Try to either shrink or extend into top. Else malloc-copy-free */
1865
    if (is_direct(oldp)) {
128,221✔
1866
      newp = direct_resize(oldp, nb);  /* this may return NULL. */
278✔
1867
    } else if (oldsize >= nb) { /* already big enough */
127,943✔
1868
      size_t rsize = oldsize - nb;
751✔
1869
      newp = oldp;
751✔
1870
      if (rsize >= MIN_CHUNK_SIZE) {
751✔
1871
        mchunkptr rem = chunk_plus_offset(newp, nb);
744✔
1872
        set_inuse(m, newp, nb);
744✔
1873
        set_inuse(m, rem, rsize);
744✔
1874
        lj_alloc_free(m, chunk2mem(rem));
744✔
1875
      }
1876
    } else if (next == m->top && oldsize + m->topsize > nb) {
127,192✔
1877
      /* Expand into top */
1878
      size_t newsize = oldsize + m->topsize;
541✔
1879
      size_t newtopsize = newsize - nb;
541✔
1880
      mchunkptr newtop = chunk_plus_offset(oldp, nb);
541✔
1881
      set_inuse(m, oldp, nb);
541✔
1882
      newtop->head = newtopsize |PINUSE_BIT;
541✔
1883
      m->top = newtop;
541✔
1884
      m->topsize = newtopsize;
541✔
1885
      newp = oldp;
541✔
1886
    }
1887

1888
    if (newp != 0) {
1,570✔
1889
      return chunk2mem(newp);
1,567✔
1890
    } else {
1891
      void *newmem = lj_alloc_malloc(m, nsize);
126,654✔
1892
      if (newmem != 0) {
126,654✔
1893
        size_t oc = oldsize - overhead_for(oldp);
126,654✔
1894
        memcpy(newmem, ptr, oc < nsize ? oc : nsize);
126,654✔
1895
        lj_alloc_free(m, ptr);
126,654✔
1896
      }
1897
      return newmem;
126,654✔
1898
    }
1899
  }
1900
#endif
1901
}
1902

1903
void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
349,883,243✔
1904
{
1905
  (void)osize;
349,883,243✔
1906
  if (nsize == 0) {
349,883,243✔
1907
    return lj_alloc_free(msp, ptr);
174,894,105✔
1908
  } else if (ptr == NULL) {
174,989,138✔
1909
    return lj_alloc_malloc(msp, nsize);
174,860,917✔
1910
  } else {
1911
    return lj_alloc_realloc(msp, ptr, nsize);
128,221✔
1912
  }
1913
}
1914

1915
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc