• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 15824149914

23 Jun 2025 12:21PM UTC coverage: 93.046% (+0.02%) from 93.029%
15824149914

push

github

mandesero
asan: instrumented LuaJIT memory allocator

This patch adds instrumentation of the internal LuaJIT memory allocator.
It now enables the detection of memory-related errors when using FFI and
within LuaJIT itself. This enhancement improves reliability and debugging
capabilities.

This patch introduces two scenarios for using ASAN with LuaJIT:
- LuaJIT using sysmalloc: `-DLUAJIT_USE_ASAN=ON`
- LuaJIT using internal memory allocator: `-DLUAJIT_USE_ASAN_HARDENING=ON`

If you want to skip tests when LuaJIT uses the internal memory allocator,
you can check the `LJ_ASAN_HARDENING` environment variable.

The test `test/tarantool-tests/lj-1034-tabov-error-frame.test.lua` has
been disabled under ASAN & LuaJIT's internal allocator due to consistently
failing with a timeout.

Part of #10231

5707 of 6044 branches covered (94.42%)

Branch coverage included in aggregate %.

1 of 2 new or added lines in 1 file covered. (50.0%)

5 existing lines in 2 files now uncovered.

21791 of 23509 relevant lines covered (92.69%)

3834293.33 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.13
/src/lj_alloc.c
1
/*
2
** Bundled memory allocator.
3
**
4
** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5
** The original bears the following remark:
6
**
7
**   This is a version (aka dlmalloc) of malloc/free/realloc written by
8
**   Doug Lea and released to the public domain, as explained at
9
**   http://creativecommons.org/licenses/publicdomain.
10
**
11
**   * Version pre-2.8.4 Wed Mar 29 19:46:29 2006    (dl at gee)
12
**
13
** No additional copyright is claimed over the customizations.
14
** Please do NOT bother the original author about this version here!
15
**
16
** If you want to use dlmalloc in another project, you should get
17
** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18
** For thread-safe derivatives, take a look at:
19
** - ptmalloc: http://www.malloc.de/
20
** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21
*/
22

23
#define lj_alloc_c
24
#define LUA_CORE
25

26
/* To get the mremap prototype. Must be defined before any system includes. */
27
#if defined(__linux__) && !defined(_GNU_SOURCE)
28
#define _GNU_SOURCE
29
#endif
30

31
#include "lj_def.h"
32
#include "lj_arch.h"
33
#include "lj_alloc.h"
34

35
#ifndef LUAJIT_USE_SYSMALLOC
36

37
#define MAX_SIZE_T                (~(size_t)0)
38
#define MALLOC_ALIGNMENT        ((size_t)8U)
39

40
#define DEFAULT_GRANULARITY        ((size_t)128U * (size_t)1024U)
41
#define DEFAULT_TRIM_THRESHOLD        ((size_t)2U * (size_t)1024U * (size_t)1024U)
42
#define DEFAULT_MMAP_THRESHOLD        ((size_t)128U * (size_t)1024U)
43
#define MAX_RELEASE_CHECK_RATE        255
44

45
/* ------------------- size_t and alignment properties -------------------- */
46

47
/* The byte and bit size of a size_t */
48
#define SIZE_T_SIZE                (sizeof(size_t))
49
#define SIZE_T_BITSIZE                (sizeof(size_t) << 3)
50

51
/* Some constants coerced to size_t */
52
/* Annoying but necessary to avoid errors on some platforms */
53
#define SIZE_T_ZERO                ((size_t)0)
54
#define SIZE_T_ONE                ((size_t)1)
55
#define SIZE_T_TWO                ((size_t)2)
56
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
57
#define FOUR_SIZE_T_SIZES        (SIZE_T_SIZE<<2)
58
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59

60
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
61
#define CHUNK_ALIGN_MASK        (MALLOC_ALIGNMENT - SIZE_T_ONE)
62

63
/* the number of bytes to offset an address to align it */
64
#define align_offset(A)\
65
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67

68
/* -------------------------- MMAP support ------------------------------- */
69

70
#define MFAIL                        ((void *)(MAX_SIZE_T))
71
#define CMFAIL                        ((char *)(MFAIL)) /* defined for convenience */
72

73
#define IS_DIRECT_BIT                (SIZE_T_ONE)
74

75

76
/* Determine system-specific block allocation method. */
77
#if LJ_TARGET_WINDOWS
78

79
#define WIN32_LEAN_AND_MEAN
80
#include <windows.h>
81

82
#define LJ_ALLOC_VIRTUALALLOC        1
83

84
#if LJ_64 && !LJ_GC64
85
#define LJ_ALLOC_NTAVM                1
86
#endif
87

88
#else
89

90
#include <errno.h>
91
/* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
92
#include <sys/mman.h>
93

94
#define LJ_ALLOC_MMAP                1
95

96
#if LJ_64
97

98
#define LJ_ALLOC_MMAP_PROBE        1
99

100
#if LJ_GC64
101
#define LJ_ALLOC_MBITS                47        /* 128 TB in LJ_GC64 mode. */
102
#elif LJ_TARGET_X64 && LJ_HASJIT
103
/* Due to limitations in the x64 compiler backend. */
104
#define LJ_ALLOC_MBITS                31        /* 2 GB on x64 with !LJ_GC64. */
105
#else
106
#define LJ_ALLOC_MBITS                32        /* 4 GB on other archs with !LJ_GC64. */
107
#endif
108

109
#endif
110

111
#if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
112
#define LJ_ALLOC_MMAP32                1
113
#endif
114

115
#if LJ_TARGET_LINUX
116
#define LJ_ALLOC_MREMAP                1
117
#endif
118

119
#endif
120

121

122
#if LJ_ALLOC_VIRTUALALLOC
123

124
#if LJ_ALLOC_NTAVM
125
/* Undocumented, but hey, that's what we all love so much about Windows. */
126
typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
127
                       size_t *size, ULONG alloctype, ULONG prot);
128
static PNTAVM ntavm;
129

130
/* Number of top bits of the lower 32 bits of an address that must be zero.
131
** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
132
*/
133
#define NTAVM_ZEROBITS                1
134

135
static void init_mmap(void)
136
{
137
  ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
138
                                 "NtAllocateVirtualMemory");
139
}
140
#define INIT_MMAP()        init_mmap()
141

142
/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
143
static void *CALL_MMAP(size_t size)
144
{
145
  DWORD olderr = GetLastError();
146
  void *ptr = NULL;
147
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
148
                  MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
149
  SetLastError(olderr);
150
  return st == 0 ? ptr : MFAIL;
151
}
152

153
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
154
static void *DIRECT_MMAP(size_t size)
155
{
156
  DWORD olderr = GetLastError();
157
  void *ptr = NULL;
158
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
159
                  MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
160
  SetLastError(olderr);
161
  return st == 0 ? ptr : MFAIL;
162
}
163

164
#else
165

166
/* Win32 MMAP via VirtualAlloc */
167
static void *CALL_MMAP(size_t size)
168
{
169
  DWORD olderr = GetLastError();
170
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
171
  SetLastError(olderr);
172
  return ptr ? ptr : MFAIL;
173
}
174

175
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
176
static void *DIRECT_MMAP(size_t size)
177
{
178
  DWORD olderr = GetLastError();
179
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
180
                            PAGE_READWRITE);
181
  SetLastError(olderr);
182
  return ptr ? ptr : MFAIL;
183
}
184

185
#endif
186

187
/* This function supports releasing coalesed segments */
188
static int CALL_MUNMAP(void *ptr, size_t size)
189
{
190
  DWORD olderr = GetLastError();
191
  MEMORY_BASIC_INFORMATION minfo;
192
  char *cptr = (char *)ptr;
193
  while (size) {
194
    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
195
      return -1;
196
    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
197
        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
198
      return -1;
199
    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
200
      return -1;
201
    cptr += minfo.RegionSize;
202
    size -= minfo.RegionSize;
203
  }
204
  SetLastError(olderr);
205
  return 0;
206
}
207

208
#elif LJ_ALLOC_MMAP
209

210
#define MMAP_PROT                (PROT_READ|PROT_WRITE)
211
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
212
#define MAP_ANONYMOUS                MAP_ANON
213
#endif
214
#define MMAP_FLAGS                (MAP_PRIVATE|MAP_ANONYMOUS)
215

216
#if LJ_ALLOC_MMAP_PROBE
217

218
#ifdef MAP_TRYFIXED
219
#define MMAP_FLAGS_PROBE        (MMAP_FLAGS|MAP_TRYFIXED)
220
#else
221
#define MMAP_FLAGS_PROBE        MMAP_FLAGS
222
#endif
223

224
#define LJ_ALLOC_MMAP_PROBE_MAX                30
225
#define LJ_ALLOC_MMAP_PROBE_LINEAR        5
226

227
#define LJ_ALLOC_MMAP_PROBE_LOWER        ((uintptr_t)0x4000)
228

229
#if LUAJIT_USE_ASAN_HARDENING
230

231
/*
232
** The work of asan (AddressSanitizer) is to detect memory errors during program execution.
233
** One way to achieve this is by adding redzones around memory allocations. The redzone is a
234
** specially allocated area of memory before and after the allocated block, which is filled
235
** with a unique value. If the program tries to access memory outside of the allocation,
236
** asan detects this attempt and generates an error message, allowing the developer to
237
** detect and fix the issue early.
238
**
239
** - Original paper: https://www.usenix.org/system/files/conference/atc12/atc12-final39.pdf
240
**
241
** LuaJIT ASAN instrumentation (mmap and others):
242
**
243
** - Memory map around allocation:
244
** -------------------------------------------------------------------------------------
245
** .. .. | [f7]    ...    [f7] | [00]     ...     [0(0-7)] | [f7]    ...    [f7] | .. ..
246
**       |    left redzone     |           data            |    right redzone    |
247
**       |  REDZONE_SIZE bytes |          N bytes          |  REDZONE_SIZE bytes |
248
** -------------------------------------------------------------------------------------
249
**
250
** left redzone:
251
**  The first SIZE_T_SIZE bytes of the redzone contain the data size N, the next SIZE_T_SIZE bytes
252
**  of the redzone contain the full size of the allocation, including the alignment of the size N
253
**  and the size of the redzones themselves.
254
*/
255

256
#include <sanitizer/asan_interface.h>
257

258
/**
259
 *
260
 * Memory map for 64-bit (shift = 3)
261
 * The shadow address is calculated by (Mem >> shift) + 0x7fff8000
262
 *
263
 * [0x10007fff8000, 0x7fffffffffff]        HighMem
264
 * [0x02008fff7000, 0x10007fff7fff]        HighShadow
265
 * [0x00008fff7000, 0x02008fff6fff]        ShadowGap
266
 * [0x00007fff8000, 0x00008fff6fff]        LowShadow
267
 * [0x000000000000, 0x00007fff7fff]        LowMem
268
 *
269
 */
270

271
/* Recommended redzone size from 16 to 2048 bytes (must be a a power of two)
272
** https://github.com/google/sanitizers/wiki/AddressSanitizerFlags
273
*/
274
#define REDZONE_SIZE FOUR_SIZE_T_SIZES
275

276
/* Total redzone size around allocation */
277
#define TOTAL_REDZONE_SIZE (REDZONE_SIZE << 1)
278

279
/* Multiple of the allocated memory size */
280
#define SIZE_ALIGNMENT MALLOC_ALIGNMENT
281

282
/**
283
 * We can only use the address from HighMem, so we must force the system allocator (mmap)
284
 * to return addresses starting from the lower bound of HighMem.
285
 */
286
static inline uintptr_t asan_lower_address()
287
{
288
  size_t shadow_scale;
289
  size_t shadow_offset;
290
  __asan_get_shadow_mapping(&shadow_scale, &shadow_offset);
291
  return (uintptr_t)(shadow_offset + (1ULL << (LJ_ALLOC_MBITS - shadow_scale)));
292
}
293

294
/* Casting to the nearest multiple of SIZE_ALIGNMENT from above */
295
#define ALIGN_SIZE(S, ALIGN)  ((size_t)(((S) + (ALIGN) - 1) & ~((ALIGN) - 1)))
296

297
#define alloc2mem(p)                ((void *)((char *)(p) + REDZONE_SIZE))
298
#define mem2alloc(mem)                ((void *)((char *)(mem) - REDZONE_SIZE))
299

300
/* Add redzones around allocation and keep the memory size and poison size. */
301
void *mark_memory_region(void *ptr, size_t msize, size_t psize)
302
{
303
  if (ptr == NULL)
304
    return NULL;
305

306
  ASAN_UNPOISON_MEMORY_REGION(ptr, TWO_SIZE_T_SIZES);
307
  *((size_t *)(ptr)) = msize;
308
  *((size_t *)(ptr) + 1) = psize;
309
  ASAN_POISON_MEMORY_REGION(ptr, psize);
310
  ptr = alloc2mem(ptr);
311
  ASAN_UNPOISON_MEMORY_REGION(ptr, msize);
312
  return ptr;
313
}
314

315
typedef enum {
316
  MEM_SIZE,
317
  POISON_SIZE
318
} SizeType;
319

320
size_t asan_get_size(void *ptr, SizeType type)
321
{
322
  size_t offset = (type == MEM_SIZE) ? 0 : SIZE_T_SIZE;
323
  ASAN_UNPOISON_MEMORY_REGION(ptr - REDZONE_SIZE + offset, SIZE_T_SIZE);
324
  size_t size = *((size_t *)(ptr - REDZONE_SIZE + offset));
325
  ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE + offset, SIZE_T_SIZE);
326
  return size;
327
}
328

329
#endif
330

331
/* No point in a giant ifdef mess. Just try to open /dev/urandom.
332
** It doesn't really matter if this fails, since we get some ASLR bits from
333
** every unsuitable allocation, too. And we prefer linear allocation, anyway.
334
*/
335
#include <fcntl.h>
336
#include <unistd.h>
337

338
static uintptr_t mmap_probe_seed(void)
×
339
{
340
  uintptr_t val;
×
341
  int fd = open("/dev/urandom", O_RDONLY);
×
342
  if (fd != -1) {
×
343
    int ok = ((size_t)read(fd, &val, sizeof(val)) == sizeof(val));
×
344
    (void)close(fd);
×
345
    if (ok) return val;
×
346
  }
347
  return 1;  /* Punt. */
348
}
349

350
static void *mmap_probe(size_t size)
41,668✔
351
{
352
  /* Hint for next allocation. Doesn't need to be thread-safe. */
353
  static uintptr_t hint_addr = 0;
41,668✔
354
  static uintptr_t hint_prng = 0;
41,668✔
355
  int olderr = errno;
41,668✔
356
  int retry;
41,668✔
357
#if LUAJIT_USE_ASAN_HARDENING
358
  /* Save the request memory size */
359
  size_t msize = size;
360
  /* Total allocation size corresponds to the memory size and the size of redzones */
361
  size = ALIGN_SIZE(size + TOTAL_REDZONE_SIZE, LJ_PAGESIZE);
362
#endif
363
  for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
41,668✔
364
    void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
41,668✔
365
    uintptr_t addr = (uintptr_t)p;
41,668✔
366
#if LUAJIT_USE_ASAN_HARDENING
367
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= asan_lower_address() &&
368
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
369
#else
370
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER &&
41,668✔
371
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
41,668✔
372
#endif
373
      /* We got a suitable address. Bump the hint address. */
374
      hint_addr = addr + size;
41,668✔
375
      errno = olderr;
41,668✔
376
#if LUAJIT_USE_ASAN_HARDENING
377
      p = mark_memory_region(p, msize, size);
378
#endif
379
      return p;
41,668✔
380
    }
381
    if (p != MFAIL) {
×
382
      munmap(p, size);
×
383
    } else if (errno == ENOMEM) {
×
384
      return MFAIL;
385
    }
386
    if (hint_addr) {
×
387
      /* First, try linear probing. */
388
      if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
389
        hint_addr += 0x1000000;
×
390
        if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
×
391
          hint_addr = 0;
×
392
        continue;
×
393
      } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
394
        /* Next, try a no-hint probe to get back an ASLR address. */
395
        hint_addr = 0;
×
396
        continue;
×
397
      }
398
    }
399
    /* Finally, try pseudo-random probing. */
400
    if (LJ_UNLIKELY(hint_prng == 0)) {
×
401
      hint_prng = mmap_probe_seed();
×
402
    }
403
    /* The unsuitable address we got has some ASLR PRNG bits. */
404
    hint_addr ^= addr & ~((uintptr_t)(LJ_PAGESIZE-1));
×
405
    do {  /* The PRNG itself is very weak, but see above. */
×
406
      hint_prng = hint_prng * 1103515245 + 12345;
×
407
      hint_addr ^= hint_prng * (uintptr_t)LJ_PAGESIZE;
×
408
      hint_addr &= (((uintptr_t)1 << LJ_ALLOC_MBITS)-1);
×
409
    } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
×
410
  }
411
  errno = olderr;
×
412
  return MFAIL;
×
413
}
414

415
#endif
416

417
#if LJ_ALLOC_MMAP32
418

419
#if defined(__sun__)
420
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0x1000)
421
#else
422
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0)
423
#endif
424

425
static void *mmap_map32(size_t size)
426
{
427
#if LJ_ALLOC_MMAP_PROBE
428
  static int fallback = 0;
429
  if (fallback)
430
    return mmap_probe(size);
431
#endif
432
  {
433
    int olderr = errno;
434
    void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
435
    errno = olderr;
436
    /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
437
#if LJ_ALLOC_MMAP_PROBE
438
    if (ptr == MFAIL) {
439
      fallback = 1;
440
      return mmap_probe(size);
441
    }
442
#endif
443
    return ptr;
444
  }
445
}
446

447
#endif
448

449
#if LJ_ALLOC_MMAP32
450
#define CALL_MMAP(size)                mmap_map32(size)
451
#elif LJ_ALLOC_MMAP_PROBE
452
#define CALL_MMAP(size)                mmap_probe(size)
453
#else
454
static void *CALL_MMAP(size_t size)
455
{
456
  int olderr = errno;
457
#if LUAJIT_USE_ASAN_HARDENING
458
  size_t msize = size;
459
  size = ALIGN_SIZE(size + TOTAL_REDZONE_SIZE, LJ_PAGESIZE);
460
#endif
461
#if LUAJIT_USE_ASAN_HARDENING
462
  void *ptr = mmap((void *)asan_lower_address(), size, MMAP_PROT, MMAP_FLAGS, -1, 0);
463
#else
464
  void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
465
#endif
466
  errno = olderr;
467
#if LUAJIT_USE_ASAN_HARDENING
468
  ptr = mark_memory_region(ptr, msize, size);
469
#endif
470
  return ptr;
471
}
472
#endif
473

474
#if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
475

476
#include <sys/resource.h>
477

478
static void init_mmap(void)
479
{
480
  struct rlimit rlim;
481
  rlim.rlim_cur = rlim.rlim_max = 0x10000;
482
  setrlimit(RLIMIT_DATA, &rlim);  /* Ignore result. May fail later. */
483
}
484
#define INIT_MMAP()        init_mmap()
485

486
#endif
487

488
static int CALL_MUNMAP(void *ptr, size_t size)
3,563✔
489
{
490
  int olderr = errno;
3,563✔
491
#if LUAJIT_USE_ASAN_HARDENING
492
  /* check that memory is not poisoned */
493
  memmove(ptr, ptr, size);
494
  size = asan_get_size(ptr, POISON_SIZE);
495
  ptr = mem2alloc(ptr);
496
#endif
497
  int ret = munmap(ptr, size);
3,482✔
498
#if LUAJIT_USE_ASAN_HARDENING
499
  if (ret == 0) {
500
    ASAN_POISON_MEMORY_REGION(ptr, size);
501
  }
502
#endif
503
  errno = olderr;
3,563✔
504
  return ret;
3,563✔
505
}
506

507
#if LJ_ALLOC_MREMAP
508
/* Need to define _GNU_SOURCE to get the mremap prototype. */
509
static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
192✔
510
{
511
  int olderr = errno;
192✔
512
#if LUAJIT_USE_ASAN_HARDENING && !(LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64))
513
  void *new_ptr = CALL_MMAP(nsz);
514
  if (new_ptr != MFAIL) {
515
    size_t oms = asan_get_size(ptr, MEM_SIZE);
516
    memcpy(new_ptr, ptr, oms > nsz ? nsz : oms);
517
    CALL_MUNMAP(ptr, osz);
518
    ptr = new_ptr;
519
  }
520
#else
521

522
#if LUAJIT_USE_ASAN_HARDENING
523
  void *old_ptr = ptr;
524
  size_t nms = nsz;
525
  osz = asan_get_size(old_ptr, POISON_SIZE);
526
  nsz = ALIGN_SIZE(nsz + TOTAL_REDZONE_SIZE, LJ_PAGESIZE);
527
  ptr = mem2alloc(ptr);
528
#endif
529
  ptr = mremap(ptr, osz, nsz, flags);
384✔
530
#if LUAJIT_USE_ASAN_HARDENING
531
  if (ptr != MFAIL) {
532
    ASAN_POISON_MEMORY_REGION((void *)((char *)(old_ptr) - REDZONE_SIZE), osz);
533
    ptr = mark_memory_region(ptr, nms, nsz);
534
  }
535
#endif
536
#endif
537
  errno = olderr;
192✔
538
  return ptr;
192✔
539
}
540

541
#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
542
#define CALL_MREMAP_NOMOVE        0
543
#define CALL_MREMAP_MAYMOVE        1
544
#if LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64)
545
#define CALL_MREMAP_MV                CALL_MREMAP_NOMOVE
546
#else
547
#define CALL_MREMAP_MV                CALL_MREMAP_MAYMOVE
548
#endif
549
#endif
550

551
#endif
552

553

554
#ifndef INIT_MMAP
555
#define INIT_MMAP()                ((void)0)
556
#endif
557

558
#ifndef DIRECT_MMAP
559
#define DIRECT_MMAP(s)                CALL_MMAP(s)
560
#endif
561

562
#ifndef CALL_MREMAP
563
#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
564
#endif
565

566
/* -----------------------  Chunk representations ------------------------ */
567

568
struct malloc_chunk {
569
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
570
  size_t               head;       /* Size and inuse bits. */
571
  struct malloc_chunk *fd;         /* double links -- used only if free. */
572
  struct malloc_chunk *bk;
573
};
574

575
typedef struct malloc_chunk  mchunk;
576
typedef struct malloc_chunk *mchunkptr;
577
typedef struct malloc_chunk *sbinptr;  /* The type of bins of chunks */
578
typedef size_t bindex_t;               /* Described below */
579
typedef unsigned int binmap_t;         /* Described below */
580
typedef unsigned int flag_t;           /* The type of various bit flag sets */
581

582
/* ------------------- Chunks sizes and alignments ----------------------- */
583

584
#define MCHUNK_SIZE                (sizeof(mchunk))
585

586
#define CHUNK_OVERHEAD                (SIZE_T_SIZE)
587

588
/* Direct chunks need a second word of overhead ... */
589
#define DIRECT_CHUNK_OVERHEAD        (TWO_SIZE_T_SIZES)
590
/* ... and additional padding for fake next-chunk at foot */
591
#define DIRECT_FOOT_PAD                (FOUR_SIZE_T_SIZES)
592

593
/* The smallest size we can malloc is an aligned minimal chunk */
594
#define MIN_CHUNK_SIZE\
595
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
596

597
#if LUAJIT_USE_ASAN_HARDENING
598
/* conversion from malloc headers to user pointers, and back */
599
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES + REDZONE_SIZE))
600
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES - REDZONE_SIZE))
601
#else
602
/* conversion from malloc headers to user pointers, and back */
603
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
604
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
605
#endif
606
/* chunk associated with aligned address A */
607
#define align_as_chunk(A)        (mchunkptr)((A) + align_offset(chunk2mem(A)))
608

609
/* Bounds on request (not chunk) sizes. */
610
#define MAX_REQUEST                ((~MIN_CHUNK_SIZE+1) << 2)
611
#define MIN_REQUEST                (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
612

613
/* pad request bytes into a usable size */
614
#define pad_request(req) \
615
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
616

617
/* pad request, checking for minimum (but not maximum) */
618
#define request2size(req) \
619
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
620

621
/* ------------------ Operations on head and foot fields ----------------- */
622

623
#define PINUSE_BIT                (SIZE_T_ONE)
624
#define CINUSE_BIT                (SIZE_T_TWO)
625
#define INUSE_BITS                (PINUSE_BIT|CINUSE_BIT)
626

627
/* Head value for fenceposts */
628
#define FENCEPOST_HEAD                (INUSE_BITS|SIZE_T_SIZE)
629

630
/* extraction of fields from head words */
631
#define cinuse(p)                ((p)->head & CINUSE_BIT)
632
#define pinuse(p)                ((p)->head & PINUSE_BIT)
633
#define chunksize(p)                ((p)->head & ~(INUSE_BITS))
634

635
#define clear_pinuse(p)                ((p)->head &= ~PINUSE_BIT)
636
#define clear_cinuse(p)                ((p)->head &= ~CINUSE_BIT)
637

638
/* Treat space at ptr +/- offset as a chunk */
639
#define chunk_plus_offset(p, s)                ((mchunkptr)(((char *)(p)) + (s)))
640
#define chunk_minus_offset(p, s)        ((mchunkptr)(((char *)(p)) - (s)))
641

642
/* Ptr to next or previous physical malloc_chunk. */
643
#define next_chunk(p)        ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
644
#define prev_chunk(p)        ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
645

646
/* extract next chunk's pinuse bit */
647
#define next_pinuse(p)        ((next_chunk(p)->head) & PINUSE_BIT)
648

649
/* Get/set size at footer */
650
#define get_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot)
651
#define set_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
652

653
/* Set size, pinuse bit, and foot */
654
#define set_size_and_pinuse_of_free_chunk(p, s)\
655
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
656

657
/* Set size, pinuse bit, foot, and clear next pinuse */
658
#define set_free_with_pinuse(p, s, n)\
659
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
660

661
#define is_direct(p)\
662
  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
663

664
/* Get the internal overhead associated with chunk p */
665
#define overhead_for(p)\
666
 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
667

668
/* ---------------------- Overlaid data structures ----------------------- */
669

670
struct malloc_tree_chunk {
671
  /* The first four fields must be compatible with malloc_chunk */
672
  size_t                    prev_foot;
673
  size_t                    head;
674
  struct malloc_tree_chunk *fd;
675
  struct malloc_tree_chunk *bk;
676

677
  struct malloc_tree_chunk *child[2];
678
  struct malloc_tree_chunk *parent;
679
  bindex_t                  index;
680
};
681

682
typedef struct malloc_tree_chunk  tchunk;
683
typedef struct malloc_tree_chunk *tchunkptr;
684
typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
685

686
/* A little helper macro for trees */
687
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
688

689
/* ----------------------------- Segments -------------------------------- */
690

691
struct malloc_segment {
692
  char        *base;             /* base address */
693
  size_t       size;             /* allocated size */
694
  struct malloc_segment *next;   /* ptr to next segment */
695
};
696

697
typedef struct malloc_segment  msegment;
698
typedef struct malloc_segment *msegmentptr;
699

700
/* ---------------------------- malloc_state ----------------------------- */
701

702
/* Bin types, widths and sizes */
703
#define NSMALLBINS                (32U)
704
#define NTREEBINS                (32U)
705
#define SMALLBIN_SHIFT                (3U)
706
#define SMALLBIN_WIDTH                (SIZE_T_ONE << SMALLBIN_SHIFT)
707
#define TREEBIN_SHIFT                (8U)
708
#define MIN_LARGE_SIZE                (SIZE_T_ONE << TREEBIN_SHIFT)
709
#define MAX_SMALL_SIZE                (MIN_LARGE_SIZE - SIZE_T_ONE)
710
#define MAX_SMALL_REQUEST  (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
711

712
struct malloc_state {
713
  binmap_t   smallmap;
714
  binmap_t   treemap;
715
  size_t     dvsize;
716
  size_t     topsize;
717
  mchunkptr  dv;
718
  mchunkptr  top;
719
  size_t     trim_check;
720
  size_t     release_checks;
721
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
722
  tbinptr    treebins[NTREEBINS];
723
  msegment   seg;
724
};
725

726
typedef struct malloc_state *mstate;
727

728
#define is_initialized(M)        ((M)->top != 0)
729

730
/* -------------------------- system alloc setup ------------------------- */
731

732
/* page-align a size */
733
#define page_align(S)\
734
 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
735

736
/* granularity-align a size */
737
#define granularity_align(S)\
738
  (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
739
   & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
740

741
#if LJ_TARGET_WINDOWS
742
#define mmap_align(S)        granularity_align(S)
743
#else
744
#define mmap_align(S)        page_align(S)
745
#endif
746

747
/*  True if segment S holds address A */
748
#define segment_holds(S, A)\
749
  ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
750

751
/* Return segment holding given address */
752
static msegmentptr segment_holding(mstate m, char *addr)
143✔
753
{
754
  msegmentptr sp = &m->seg;
143✔
755
  for (;;) {
143✔
756
    if (addr >= sp->base && addr < sp->base + sp->size)
143✔
757
      return sp;
758
    if ((sp = sp->next) == 0)
×
759
      return 0;
760
  }
761
}
762

763
/* Return true if segment contains a segment link */
764
static int has_segment_link(mstate m, msegmentptr ss)
23✔
765
{
766
  msegmentptr sp = &m->seg;
23✔
767
  for (;;) {
89✔
768
    if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
89✔
769
      return 1;
770
    if ((sp = sp->next) == 0)
89✔
771
      return 0;
772
  }
773
}
774

775
/*
776
  TOP_FOOT_SIZE is padding at the end of a segment, including space
777
  that may be needed to place segment records and fenceposts when new
778
  noncontiguous segments are added.
779
*/
780
#define TOP_FOOT_SIZE\
781
  (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
782

783
/* ---------------------------- Indexing Bins ---------------------------- */
784

785
#define is_small(s)                (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
786
#define small_index(s)                ((s)  >> SMALLBIN_SHIFT)
787
#define small_index2size(i)        ((i)  << SMALLBIN_SHIFT)
788
#define MIN_SMALL_INDEX                (small_index(MIN_CHUNK_SIZE))
789

790
/* addressing by index. See above about smallbin repositioning */
791
#define smallbin_at(M, i)        ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
792
#define treebin_at(M,i)                (&((M)->treebins[i]))
793

794
/* assign tree index for size S to variable I */
795
#define compute_tree_index(S, I)\
796
{\
797
  unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
798
  if (X == 0) {\
799
    I = 0;\
800
  } else if (X > 0xFFFF) {\
801
    I = NTREEBINS-1;\
802
  } else {\
803
    unsigned int K = lj_fls(X);\
804
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
805
  }\
806
}
807

808
/* Bit representing maximum resolved size in a treebin at i */
809
#define bit_for_tree_index(i) \
810
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
811

812
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
813
#define leftshift_for_tree_index(i) \
814
   ((i == NTREEBINS-1)? 0 : \
815
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
816

817
/* The size of the smallest chunk held in bin with index i */
818
#define minsize_for_tree_index(i) \
819
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
820
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
821

822
/* ------------------------ Operations on bin maps ----------------------- */
823

824
/* bit corresponding to given index */
825
#define idx2bit(i)                ((binmap_t)(1) << (i))
826

827
/* Mark/Clear bits with given index */
828
#define mark_smallmap(M,i)        ((M)->smallmap |=  idx2bit(i))
829
#define clear_smallmap(M,i)        ((M)->smallmap &= ~idx2bit(i))
830
#define smallmap_is_marked(M,i)        ((M)->smallmap &   idx2bit(i))
831

832
#define mark_treemap(M,i)        ((M)->treemap  |=  idx2bit(i))
833
#define clear_treemap(M,i)        ((M)->treemap  &= ~idx2bit(i))
834
#define treemap_is_marked(M,i)        ((M)->treemap  &   idx2bit(i))
835

836
/* mask with all bits to left of least bit of x on */
837
#define left_bits(x)                ((x<<1) | (~(x<<1)+1))
838

839
/* Set cinuse bit and pinuse bit of next chunk */
840
#define set_inuse(M,p,s)\
841
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
842
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
843

844
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
845
#define set_inuse_and_pinuse(M,p,s)\
846
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
847
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
848

849
/* Set size, cinuse and pinuse bit of this chunk */
850
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
851
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
852

853
/* ----------------------- Operations on smallbins ----------------------- */
854

855
/* Link a free chunk into a smallbin  */
856
#define insert_small_chunk(M, P, S) {\
857
  bindex_t I = small_index(S);\
858
  mchunkptr B = smallbin_at(M, I);\
859
  mchunkptr F = B;\
860
  if (!smallmap_is_marked(M, I))\
861
    mark_smallmap(M, I);\
862
  else\
863
    F = B->fd;\
864
  B->fd = P;\
865
  F->bk = P;\
866
  P->fd = F;\
867
  P->bk = B;\
868
}
869

870
/* Unlink a chunk from a smallbin  */
871
#define unlink_small_chunk(M, P, S) {\
872
  mchunkptr F = P->fd;\
873
  mchunkptr B = P->bk;\
874
  bindex_t I = small_index(S);\
875
  if (F == B) {\
876
    clear_smallmap(M, I);\
877
  } else {\
878
    F->bk = B;\
879
    B->fd = F;\
880
  }\
881
}
882

883
/* Unlink the first chunk from a smallbin */
884
#define unlink_first_small_chunk(M, B, P, I) {\
885
  mchunkptr F = P->fd;\
886
  if (B == F) {\
887
    clear_smallmap(M, I);\
888
  } else {\
889
    B->fd = F;\
890
    F->bk = B;\
891
  }\
892
}
893

894
/* Replace dv node, binning the old one */
895
/* Used only when dvsize known to be small */
896
#define replace_dv(M, P, S) {\
897
  size_t DVS = M->dvsize;\
898
  if (DVS != 0) {\
899
    mchunkptr DV = M->dv;\
900
    insert_small_chunk(M, DV, DVS);\
901
  }\
902
  M->dvsize = S;\
903
  M->dv = P;\
904
}
905

906
/* ------------------------- Operations on trees ------------------------- */
907

908
/* Insert chunk into tree */
909
#define insert_large_chunk(M, X, S) {\
910
  tbinptr *H;\
911
  bindex_t I;\
912
  compute_tree_index(S, I);\
913
  H = treebin_at(M, I);\
914
  X->index = I;\
915
  X->child[0] = X->child[1] = 0;\
916
  if (!treemap_is_marked(M, I)) {\
917
    mark_treemap(M, I);\
918
    *H = X;\
919
    X->parent = (tchunkptr)H;\
920
    X->fd = X->bk = X;\
921
  } else {\
922
    tchunkptr T = *H;\
923
    size_t K = S << leftshift_for_tree_index(I);\
924
    for (;;) {\
925
      if (chunksize(T) != S) {\
926
        tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
927
        K <<= 1;\
928
        if (*C != 0) {\
929
          T = *C;\
930
        } else {\
931
          *C = X;\
932
          X->parent = T;\
933
          X->fd = X->bk = X;\
934
          break;\
935
        }\
936
      } else {\
937
        tchunkptr F = T->fd;\
938
        T->fd = F->bk = X;\
939
        X->fd = F;\
940
        X->bk = T;\
941
        X->parent = 0;\
942
        break;\
943
      }\
944
    }\
945
  }\
946
}
947

948
#define unlink_large_chunk(M, X) {\
949
  tchunkptr XP = X->parent;\
950
  tchunkptr R;\
951
  if (X->bk != X) {\
952
    tchunkptr F = X->fd;\
953
    R = X->bk;\
954
    F->bk = R;\
955
    R->fd = F;\
956
  } else {\
957
    tchunkptr *RP;\
958
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
959
        ((R = *(RP = &(X->child[0]))) != 0)) {\
960
      tchunkptr *CP;\
961
      while ((*(CP = &(R->child[1])) != 0) ||\
962
             (*(CP = &(R->child[0])) != 0)) {\
963
        R = *(RP = CP);\
964
      }\
965
      *RP = 0;\
966
    }\
967
  }\
968
  if (XP != 0) {\
969
    tbinptr *H = treebin_at(M, X->index);\
970
    if (X == *H) {\
971
      if ((*H = R) == 0) \
972
        clear_treemap(M, X->index);\
973
    } else {\
974
      if (XP->child[0] == X) \
975
        XP->child[0] = R;\
976
      else \
977
        XP->child[1] = R;\
978
    }\
979
    if (R != 0) {\
980
      tchunkptr C0, C1;\
981
      R->parent = XP;\
982
      if ((C0 = X->child[0]) != 0) {\
983
        R->child[0] = C0;\
984
        C0->parent = R;\
985
      }\
986
      if ((C1 = X->child[1]) != 0) {\
987
        R->child[1] = C1;\
988
        C1->parent = R;\
989
      }\
990
    }\
991
  }\
992
}
993

994
/* Relays to large vs small bin operations */
995

996
#define insert_chunk(M, P, S)\
997
  if (is_small(S)) { insert_small_chunk(M, P, S)\
998
  } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
999

1000
#define unlink_chunk(M, P, S)\
1001
  if (is_small(S)) { unlink_small_chunk(M, P, S)\
1002
  } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
1003

1004
/* -----------------------  Direct-mmapping chunks ----------------------- */
1005

1006
static void *direct_alloc(size_t nb)
3,060✔
1007
{
1008
#if LUAJIT_USE_ASAN_HARDENING
1009
  nb += TOTAL_REDZONE_SIZE;
1010
#endif
1011
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3,060✔
1012
#if LUAJIT_USE_ASAN_HARDENING
1013
  mmsize -= TOTAL_REDZONE_SIZE;
1014
#endif
1015
  if (LJ_LIKELY(mmsize > nb)) {     /* Check for wrap around 0 */
3,060✔
1016
    char *mm = (char *)(DIRECT_MMAP(mmsize));
3,060✔
1017
    if (mm != CMFAIL) {
3,060✔
1018
      size_t offset = align_offset(chunk2mem(mm));
3,060✔
1019
      size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
3,060✔
1020
      mchunkptr p = (mchunkptr)(mm + offset);
3,060✔
1021
      p->prev_foot = offset | IS_DIRECT_BIT;
3,060✔
1022
      p->head = psize|CINUSE_BIT;
3,060✔
1023
      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3,060✔
1024
      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
3,060✔
1025
      return chunk2mem(p);
3,060✔
1026
    }
1027
  }
1028
  return NULL;
1029
}
1030

1031
static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
215✔
1032
{
1033
  size_t oldsize = chunksize(oldp);
215✔
1034
  if (is_small(nb)) /* Can't shrink direct regions below small size */
215✔
1035
    return NULL;
1036
  /* Keep old chunk if big enough but not too big */
1037
  if (oldsize >= nb + SIZE_T_SIZE &&
212✔
1038
      (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
75✔
1039
    return oldp;
1040
  } else {
1041
    size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
169✔
1042
    size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
169✔
1043
    size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
169✔
1044
    char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
169✔
1045
                                   oldmmsize, newmmsize, CALL_MREMAP_MV);
1046
    if (cp != CMFAIL) {
169✔
1047
      mchunkptr newp = (mchunkptr)(cp + offset);
169✔
1048
      size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
169✔
1049
      newp->head = psize|CINUSE_BIT;
169✔
1050
      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
169✔
1051
      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
169✔
1052
      return newp;
169✔
1053
    }
1054
  }
1055
  return NULL;
1056
}
1057

1058
/* -------------------------- mspace management -------------------------- */
1059

1060
/* Initialize top chunk and its size */
1061
static void init_top(mstate m, mchunkptr p, size_t psize)
1,496✔
1062
{
1063
  /* Ensure alignment */
1064
  void *t = chunk2mem(p);
1,496✔
1065
#if LUAJIT_USE_ASAN_HARDENING
1066
  t = mem2alloc(t);
1067
#endif
NEW
1068
  size_t offset = align_offset(t);
×
1069

1070
  p = (mchunkptr)((char *)p + offset);
1,496✔
1071
  psize -= offset;
1,496✔
1072

1073
  m->top = p;
1,496✔
1074
  m->topsize = psize;
1,496✔
1075
  p->head = psize | PINUSE_BIT;
1,496✔
1076
  /* set size of fake trailing chunk holding overhead space only once */
1077
  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
1,496✔
1078
  m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
1,496✔
1079
}
983✔
1080

1081
/* Initialize bins for a new mstate that is otherwise zeroed out */
1082
static void init_bins(mstate m)
393✔
1083
{
1084
  /* Establish circular links for smallbins */
1085
  bindex_t i;
393✔
1086
  for (i = 0; i < NSMALLBINS; i++) {
12,969✔
1087
    sbinptr bin = smallbin_at(m,i);
12,576✔
1088
    bin->fd = bin->bk = bin;
12,576✔
1089
  }
1090
}
1091

1092
/* Allocate chunk and prepend remainder with chunk in successor base. */
1093
static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
37,135✔
1094
{
1095
  mchunkptr p = align_as_chunk(newbase);
37,135✔
1096
  mchunkptr oldfirst = align_as_chunk(oldbase);
37,135✔
1097
  size_t psize = (size_t)((char *)oldfirst - (char *)p);
37,135✔
1098
  mchunkptr q = chunk_plus_offset(p, nb);
37,135✔
1099
  size_t qsize = psize - nb;
37,135✔
1100
  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
37,135✔
1101

1102
  /* consolidate remainder with first chunk of old base */
1103
  if (oldfirst == m->top) {
37,135✔
1104
    size_t tsize = m->topsize += qsize;
×
1105
    m->top = q;
×
1106
    q->head = tsize | PINUSE_BIT;
×
1107
  } else if (oldfirst == m->dv) {
37,135✔
1108
    size_t dsize = m->dvsize += qsize;
×
1109
    m->dv = q;
×
1110
    set_size_and_pinuse_of_free_chunk(q, dsize);
×
1111
  } else {
1112
    if (!cinuse(oldfirst)) {
37,135✔
1113
      size_t nsize = chunksize(oldfirst);
9✔
1114
      unlink_chunk(m, oldfirst, nsize);
9✔
1115
      oldfirst = chunk_plus_offset(oldfirst, nsize);
9✔
1116
      qsize += nsize;
9✔
1117
    }
1118
    set_free_with_pinuse(q, qsize, oldfirst);
37,135✔
1119
    insert_chunk(m, q, qsize);
37,135✔
1120
  }
1121

1122
  return chunk2mem(p);
37,135✔
1123
}
1124

1125
/* Add a segment to hold a new noncontiguous region */
1126
static void add_segment(mstate m, char *tbase, size_t tsize)
120✔
1127
{
1128
  /* Determine locations and sizes of segment, fenceposts, old top */
1129
  char *old_top = (char *)m->top;
120✔
1130
  msegmentptr oldsp = segment_holding(m, old_top);
120✔
1131
#if LUAJIT_USE_ASAN_HARDENING
1132
  ASAN_UNPOISON_MEMORY_REGION(oldsp, sizeof(struct malloc_segment));
1133
#endif
1134
  char *old_end = oldsp->base + oldsp->size;
120✔
1135
  size_t ssize = pad_request(sizeof(struct malloc_segment));
120✔
1136
  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
120✔
1137
  size_t offset = align_offset(chunk2mem(rawsp));
120✔
1138
  char *asp = rawsp + offset;
120✔
1139
  char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
120✔
1140
  mchunkptr sp = (mchunkptr)csp;
120✔
1141
  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
120✔
1142
#if LUAJIT_USE_ASAN_HARDENING
1143
  ss = (msegmentptr)(mem2alloc(ss));
1144
#endif
1145
  mchunkptr tnext = chunk_plus_offset(sp, ssize);
120✔
1146
  mchunkptr p = tnext;
120✔
1147

1148
  /* reset top to new space */
1149
  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
120✔
1150

1151
  /* Set up segment record */
1152
  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
120✔
1153
  *ss = m->seg; /* Push current record */
120✔
1154
  m->seg.base = tbase;
120✔
1155
  m->seg.size = tsize;
120✔
1156
  m->seg.next = ss;
120✔
1157

1158
  /* Insert trailing fenceposts */
1159
  for (;;) {
378✔
1160
    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
378✔
1161
    p->head = FENCEPOST_HEAD;
378✔
1162
    if ((char *)(&(nextp->head)) < old_end)
378✔
1163
      p = nextp;
1164
    else
1165
      break;
1166
  }
1167

1168
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1169
  if (csp != old_top) {
120✔
1170
    mchunkptr q = (mchunkptr)old_top;
110✔
1171
    size_t psize = (size_t)(csp - old_top);
110✔
1172
    mchunkptr tn = chunk_plus_offset(q, psize);
110✔
1173
    set_free_with_pinuse(q, psize, tn);
110✔
1174
    insert_chunk(m, q, psize);
114✔
1175
  }
1176
}
120✔
1177

1178
/* -------------------------- System allocation -------------------------- */
1179

1180
static void *alloc_sys(mstate m, size_t nb)
41,275✔
1181
{
1182
  char *tbase = CMFAIL;
41,275✔
1183
  size_t tsize = 0;
41,275✔
1184

1185
  /* Directly map large chunks */
1186
  if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
41,275✔
1187
    void *mem = direct_alloc(nb);
3,060✔
1188
    if (mem != 0)
3,060✔
1189
      return mem;
1190
  }
1191

1192
  {
1193
    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
38,215✔
1194
#if LUAJIT_USE_ASAN_HARDENING
1195
    req += TOTAL_REDZONE_SIZE;
1196
#endif
1197
    size_t rsize = granularity_align(req);
38,215✔
1198
#if LUAJIT_USE_ASAN_HARDENING
1199
    rsize -= TOTAL_REDZONE_SIZE;
1200
#endif
1201
    if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
38,215✔
1202
      char *mp = (char *)(CALL_MMAP(rsize));
38,215✔
1203
      if (mp != CMFAIL) {
38,215✔
1204
        tbase = mp;
38,215✔
1205
        tsize = rsize;
38,215✔
1206
      }
1207
    }
1208
  }
1209

1210
  if (tbase != CMFAIL) {
38,215✔
1211
    msegmentptr sp = &m->seg;
38,215✔
1212
    /* Try to merge with an existing segment */
1213
    while (sp != 0 && tbase != sp->base + sp->size)
352,582✔
1214
      sp = sp->next;
314,367✔
1215
    if (sp != 0 && segment_holds(sp, m->top)) { /* append */
38,215✔
1216
      sp->size += tsize;
960✔
1217
      init_top(m, m->top, m->topsize + tsize);
960✔
1218
    } else {
1219
      sp = &m->seg;
1220
      while (sp != 0 && sp->base != tbase + tsize)
65,812✔
1221
        sp = sp->next;
28,557✔
1222
      if (sp != 0) {
37,255✔
1223
        char *oldbase = sp->base;
37,135✔
1224
        sp->base = tbase;
37,135✔
1225
        sp->size += tsize;
37,135✔
1226
        return prepend_alloc(m, tbase, oldbase, nb);
37,135✔
1227
      } else {
1228
        add_segment(m, tbase, tsize);
120✔
1229
      }
1230
    }
1231

1232
    if (nb < m->topsize) { /* Allocate from new or extended top space */
1,080✔
1233
      size_t rsize = m->topsize -= nb;
1,080✔
1234
      mchunkptr p = m->top;
1,080✔
1235
      mchunkptr r = m->top = chunk_plus_offset(p, nb);
1,080✔
1236
      r->head = rsize | PINUSE_BIT;
1,080✔
1237
      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1,080✔
1238
      return chunk2mem(p);
1,080✔
1239
    }
1240
  }
1241

1242
  return NULL;
1243
}
1244

1245
/* -----------------------  system deallocation -------------------------- */
1246

1247
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1248
static size_t release_unused_segments(mstate m)
660,140✔
1249
{
1250
  size_t released = 0;
660,140✔
1251
  size_t nsegs = 0;
660,140✔
1252
  msegmentptr pred = &m->seg;
660,140✔
1253
  msegmentptr sp = pred->next;
660,140✔
1254
  while (sp != 0) {
7,327,871✔
1255
    char *base = sp->base;
6,667,731✔
1256
    size_t size = sp->size;
6,667,731✔
1257
    msegmentptr next = sp->next;
6,667,731✔
1258
    nsegs++;
6,667,731✔
1259
    {
1260
      mchunkptr p = align_as_chunk(base);
6,667,731✔
1261
      size_t psize = chunksize(p);
6,667,731✔
1262
      /* Can unmap if first chunk holds entire segment and not pinned */
1263
      if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
6,667,731✔
1264
        tchunkptr tp = (tchunkptr)p;
81✔
1265
        if (p == m->dv) {
81✔
1266
          m->dv = 0;
×
1267
          m->dvsize = 0;
×
1268
        } else {
1269
          unlink_large_chunk(m, tp);
84✔
1270
        }
1271
        if (CALL_MUNMAP(base, size) == 0) {
81✔
1272
          released += size;
81✔
1273
          /* unlink obsoleted record */
1274
          sp = pred;
81✔
1275
          sp->next = next;
81✔
1276
        } else { /* back out if cannot unmap */
1277
          insert_large_chunk(m, tp, psize);
×
1278
        }
1279
      }
1280
    }
1281
    pred = sp;
1282
    sp = next;
1283
  }
1284
  /* Reset check counter */
1285
  m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
660,140✔
1286
                      nsegs : MAX_RELEASE_CHECK_RATE;
660,140✔
1287
  return released;
660,140✔
1288
}
1289

1290
static int alloc_trim(mstate m, size_t pad)
23✔
1291
{
1292
  size_t released = 0;
23✔
1293
  if (pad < MAX_REQUEST && is_initialized(m)) {
23✔
1294
    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
23✔
1295

1296
    if (m->topsize > pad) {
23✔
1297
      /* Shrink top space in granularity-size units, keeping at least one */
1298
      size_t unit = DEFAULT_GRANULARITY;
23✔
1299
      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
23✔
1300
                      SIZE_T_ONE) * unit;
1301
      msegmentptr sp = segment_holding(m, (char *)m->top);
23✔
1302

1303
      if (sp->size >= extra &&
23✔
1304
          !has_segment_link(m, sp)) { /* can't shrink if pinned */
46✔
1305
        size_t newsize = sp->size - extra;
23✔
1306
        /* Prefer mremap, fall back to munmap */
1307
        if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
23✔
1308
            (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
×
1309
          released = extra;
23✔
1310
        }
1311
      }
1312

1313
      if (released != 0) {
23✔
1314
        sp->size -= released;
23✔
1315
        init_top(m, m->top, m->topsize - released);
23✔
1316
      }
1317
    }
1318

1319
    /* Unmap any unused mmapped segments */
1320
    released += release_unused_segments(m);
23✔
1321

1322
    /* On failure, disable autotrim to avoid repeated failed future calls */
1323
    if (released == 0 && m->topsize > m->trim_check)
23✔
1324
      m->trim_check = MAX_SIZE_T;
×
1325
  }
1326

1327
  return (released != 0)? 1 : 0;
23✔
1328
}
1329

1330
/* ---------------------------- malloc support --------------------------- */
1331

1332
/* allocate a large request from the best fitting chunk in a treebin */
1333
static void *tmalloc_large(mstate m, size_t nb)
289,029✔
1334
{
1335
  tchunkptr v = 0;
289,029✔
1336
  size_t rsize = ~nb+1; /* Unsigned negation */
289,029✔
1337
  tchunkptr t;
289,029✔
1338
  bindex_t idx;
289,029✔
1339
  compute_tree_index(nb, idx);
289,029✔
1340

1341
  if ((t = *treebin_at(m, idx)) != 0) {
289,029✔
1342
    /* Traverse tree for this bin looking for node with size == nb */
1343
    size_t sizebits = nb << leftshift_for_tree_index(idx);
122,759✔
1344
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
122,759✔
1345
    for (;;) {
258,523✔
1346
      tchunkptr rt;
190,641✔
1347
      size_t trem = chunksize(t) - nb;
190,641✔
1348
      if (trem < rsize) {
190,641✔
1349
        v = t;
105,047✔
1350
        if ((rsize = trem) == 0)
105,047✔
1351
          break;
1352
      }
1353
      rt = t->child[1];
169,847✔
1354
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
169,847✔
1355
      if (rt != 0 && rt != t)
169,847✔
1356
        rst = rt;
54,515✔
1357
      if (t == 0) {
169,847✔
1358
        t = rst; /* set t to least subtree holding sizes > nb */
1359
        break;
1360
      }
1361
      sizebits <<= 1;
67,882✔
1362
    }
1363
  }
1364

1365
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
289,029✔
1366
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
223,431✔
1367
    if (leftbits != 0)
223,431✔
1368
      t = *treebin_at(m, lj_ffs(leftbits));
211,114✔
1369
  }
1370

1371
  while (t != 0) { /* find smallest of tree or subtree */
647,080✔
1372
    size_t trem = chunksize(t) - nb;
358,051✔
1373
    if (trem < rsize) {
358,051✔
1374
      rsize = trem;
280,753✔
1375
      v = t;
280,753✔
1376
    }
1377
    t = leftmost_child(t);
358,051✔
1378
  }
1379

1380
  /*  If dv is a better fit, return NULL so malloc will use it */
1381
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
289,029✔
1382
    mchunkptr r = chunk_plus_offset(v, nb);
241,655✔
1383
    unlink_large_chunk(m, v);
256,460✔
1384
    if (rsize < MIN_CHUNK_SIZE) {
241,655✔
1385
      set_inuse_and_pinuse(m, v, (rsize + nb));
34,589✔
1386
    } else {
1387
      set_size_and_pinuse_of_inuse_chunk(m, v, nb);
207,066✔
1388
      set_size_and_pinuse_of_free_chunk(r, rsize);
207,066✔
1389
      insert_chunk(m, r, rsize);
235,268✔
1390
    }
1391
    return chunk2mem(v);
241,655✔
1392
  }
1393
  return NULL;
1394
}
1395

1396
/* allocate a small request from the best fitting chunk in a treebin */
1397
static void *tmalloc_small(mstate m, size_t nb)
190,718✔
1398
{
1399
  tchunkptr t, v;
190,718✔
1400
  mchunkptr r;
190,718✔
1401
  size_t rsize;
190,718✔
1402
  bindex_t i = lj_ffs(m->treemap);
190,718✔
1403

1404
  v = t = *treebin_at(m, i);
190,718✔
1405
  rsize = chunksize(t) - nb;
190,718✔
1406

1407
  while ((t = leftmost_child(t)) != 0) {
406,706✔
1408
    size_t trem = chunksize(t) - nb;
215,988✔
1409
    if (trem < rsize) {
215,988✔
1410
      rsize = trem;
111,993✔
1411
      v = t;
111,993✔
1412
    }
1413
  }
1414

1415
  r = chunk_plus_offset(v, nb);
190,718✔
1416
  unlink_large_chunk(m, v);
225,528✔
1417
  if (rsize < MIN_CHUNK_SIZE) {
190,718✔
1418
    set_inuse_and_pinuse(m, v, (rsize + nb));
31✔
1419
  } else {
1420
    set_size_and_pinuse_of_inuse_chunk(m, v, nb);
190,687✔
1421
    set_size_and_pinuse_of_free_chunk(r, rsize);
190,687✔
1422
    replace_dv(m, r, rsize);
190,687✔
1423
  }
1424
  return chunk2mem(v);
190,718✔
1425
}
1426

1427
/* ----------------------------------------------------------------------- */
1428

1429
void *lj_alloc_create(void)
393✔
1430
{
1431
  size_t tsize = DEFAULT_GRANULARITY;
393✔
1432
#if LUAJIT_USE_ASAN_HARDENING
1433
  tsize -= TOTAL_REDZONE_SIZE;
1434
#endif
1435
  char *tbase;
393✔
1436
  INIT_MMAP();
393✔
1437
  tbase = (char *)(CALL_MMAP(tsize));
393✔
1438
  if (tbase != CMFAIL) {
393✔
1439
    size_t msize = pad_request(sizeof(struct malloc_state));
393✔
1440
    mchunkptr mn;
393✔
1441
#if LUAJIT_USE_ASAN_HARDENING
1442
    mchunkptr msp = (mchunkptr)(tbase + align_offset(mem2alloc(chunk2mem(tbase))));
1443
    mstate m = (mstate)(mem2alloc(chunk2mem(msp)));
1444
#else
1445
    mchunkptr msp = align_as_chunk(tbase);
393✔
1446
    mstate m = (mstate)(chunk2mem(msp));
393✔
1447
#endif
1448
    memset(m, 0, msize);
393✔
1449
    msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
393✔
1450
    m->seg.base = tbase;
393✔
1451
    m->seg.size = tsize;
393✔
1452
    m->release_checks = MAX_RELEASE_CHECK_RATE;
393✔
1453
    init_bins(m);
393✔
1454
#if LUAJIT_USE_ASAN_HARDENING
1455
    mn = next_chunk((mchunkptr)((char *)(m) - TWO_SIZE_T_SIZES));
1456
#else
1457
    mn = next_chunk(mem2chunk(m));
393✔
1458
#endif
1459
    init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
393✔
1460
    return m;
393✔
1461
  }
1462
  return NULL;
1463
}
1464

1465
void lj_alloc_destroy(void *msp)
383✔
1466
{
1467
  mstate ms = (mstate)msp;
383✔
1468
  msegmentptr sp = &ms->seg;
383✔
1469
  while (sp != 0) {
805✔
1470
    char *base = sp->base;
422✔
1471
    size_t size = sp->size;
422✔
1472
    sp = sp->next;
422✔
1473
#if LUAJIT_USE_ASAN_HARDENING
1474
    ASAN_UNPOISON_MEMORY_REGION(base, size);
1475
#endif
1476
    CALL_MUNMAP(base, size);
422✔
1477
  }
1478
}
383✔
1479

1480
static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
174,883,990✔
1481
{
1482
#if LUAJIT_USE_ASAN_HARDENING
1483
  if (nsize == 0)
1484
    nsize = MIN_CHUNK_SIZE;
1485
  size_t mem_size = nsize;
1486
  size_t poison_size = ALIGN_SIZE(nsize, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
1487
  nsize = poison_size;
1488
#endif
1489
  mstate ms = (mstate)msp;
174,883,990✔
1490
  void *mem;
174,883,990✔
1491
  size_t nb;
174,883,990✔
1492
  if (nsize <= MAX_SMALL_REQUEST) {
174,883,990✔
1493
    bindex_t idx;
174,558,110✔
1494
    binmap_t smallbits;
174,558,110✔
1495
    nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
174,558,110✔
1496
    idx = small_index(nb);
174,558,110✔
1497
    smallbits = ms->smallmap >> idx;
174,558,110✔
1498

1499
    if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
174,558,110✔
1500
      mchunkptr b, p;
571,855✔
1501
      idx += ~smallbits & 1;       /* Uses next bin if idx empty */
571,855✔
1502
      b = smallbin_at(ms, idx);
571,855✔
1503
      p = b->fd;
571,855✔
1504
      unlink_first_small_chunk(ms, b, p, idx);
571,855✔
1505
      set_inuse_and_pinuse(ms, p, small_index2size(idx));
571,855✔
1506
      mem = chunk2mem(p);
571,855✔
1507
#if LUAJIT_USE_ASAN_HARDENING
1508
      mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1509
#endif
1510
      return mem;
571,855✔
1511
    } else if (nb > ms->dvsize) {
173,986,255✔
1512
      if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4,668,400✔
1513
        mchunkptr b, p, r;
124,142✔
1514
        size_t rsize;
124,142✔
1515
        binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
124,142✔
1516
        bindex_t i = lj_ffs(leftbits);
124,142✔
1517
        b = smallbin_at(ms, i);
124,142✔
1518
        p = b->fd;
124,142✔
1519
        unlink_first_small_chunk(ms, b, p, i);
124,142✔
1520
        rsize = small_index2size(i) - nb;
124,142✔
1521
        /* Fit here cannot be remainderless if 4byte sizes */
1522
        if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
124,142✔
1523
          set_inuse_and_pinuse(ms, p, small_index2size(i));
24,839✔
1524
        } else {
1525
          set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
99,303✔
1526
          r = chunk_plus_offset(p, nb);
99,303✔
1527
          set_size_and_pinuse_of_free_chunk(r, rsize);
99,303✔
1528
          replace_dv(ms, r, rsize);
99,303✔
1529
        }
1530
        mem = chunk2mem(p);
124,142✔
1531
#if LUAJIT_USE_ASAN_HARDENING
1532
  mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1533
#endif
1534
        return mem;
124,142✔
1535
      } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4,544,258✔
1536
#if LUAJIT_USE_ASAN_HARDENING
1537
  mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1538
#endif
1539
        return mem;
1540
      }
1541
    }
1542
  } else if (nsize >= MAX_REQUEST) {
325,880✔
1543
    nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1544
  } else {
1545
    nb = pad_request(nsize);
325,880✔
1546
    if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
325,880✔
1547
#if LUAJIT_USE_ASAN_HARDENING
1548
      mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1549
#endif
1550
      return mem;
1551
    }
1552
  }
1553

1554
  if (nb <= ms->dvsize) {
173,755,620✔
1555
    size_t rsize = ms->dvsize - nb;
169,370,298✔
1556
    mchunkptr p = ms->dv;
169,370,298✔
1557
    if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
169,370,298✔
1558
      mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
169,194,129✔
1559
      ms->dvsize = rsize;
169,194,129✔
1560
      set_size_and_pinuse_of_free_chunk(r, rsize);
169,194,129✔
1561
      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
169,194,129✔
1562
    } else { /* exhaust dv */
1563
      size_t dvs = ms->dvsize;
176,169✔
1564
      ms->dvsize = 0;
176,169✔
1565
      ms->dv = 0;
176,169✔
1566
      set_inuse_and_pinuse(ms, p, dvs);
176,169✔
1567
    }
1568
    mem = chunk2mem(p);
169,370,298✔
1569
#if LUAJIT_USE_ASAN_HARDENING
1570
    mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1571
#endif
1572
    return mem;
169,370,298✔
1573
  } else if (nb < ms->topsize) { /* Split top */
4,385,322✔
1574
    size_t rsize = ms->topsize -= nb;
4,344,047✔
1575
    mchunkptr p = ms->top;
4,344,047✔
1576
    mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4,344,047✔
1577
    r->head = rsize | PINUSE_BIT;
4,344,047✔
1578
    set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4,344,047✔
1579
    mem = chunk2mem(p);
4,344,047✔
1580
#if LUAJIT_USE_ASAN_HARDENING
1581
    mem = mark_memory_region(mem2alloc(mem), mem_size, poison_size);
1582
#endif
1583
    return mem;
4,344,047✔
1584
  }
1585
#if LUAJIT_USE_ASAN_HARDENING
1586
  return mark_memory_region(mem2alloc(alloc_sys(ms, nb)), mem_size, poison_size);
1587
#else
1588
  return alloc_sys(ms, nb);
41,275✔
1589
#endif
1590
}
1591

1592
static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
174,905,287✔
1593
{
1594
#if LUAJIT_USE_ASAN_HARDENING
1595
  if (ptr != 0) {    
1596
    size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1597
    size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1598

1599
    memmove(ptr, ptr, mem_size);
1600
    ASAN_POISON_MEMORY_REGION(mem2alloc(ptr), poison_size);
1601
  }
1602
  return NULL;
1603
#else
1604
  if (ptr != 0) {
174,905,287✔
1605
    mchunkptr p = mem2chunk(ptr);
174,876,374✔
1606
    mstate fm = (mstate)msp;
174,876,374✔
1607
    size_t psize = chunksize(p);
174,876,374✔
1608
    mchunkptr next = chunk_plus_offset(p, psize);
174,876,374✔
1609
    if (!pinuse(p)) {
174,876,374✔
1610
      size_t prevsize = p->prev_foot;
27,197,601✔
1611
      if ((prevsize & IS_DIRECT_BIT) != 0) {
27,197,601✔
1612
        prevsize &= ~IS_DIRECT_BIT;
3,060✔
1613
        psize += prevsize + DIRECT_FOOT_PAD;
3,060✔
1614
        CALL_MUNMAP((char *)p - prevsize, psize);
3,060✔
1615
        return NULL;
3,060✔
1616
      } else {
1617
        mchunkptr prev = chunk_minus_offset(p, prevsize);
27,194,541✔
1618
        psize += prevsize;
27,194,541✔
1619
        p = prev;
27,194,541✔
1620
        /* consolidate backward */
1621
        if (p != fm->dv) {
27,194,541✔
1622
          unlink_chunk(fm, p, prevsize);
26,889,795✔
1623
        } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
382,182✔
1624
          fm->dvsize = psize;
279,846✔
1625
          set_free_with_pinuse(p, psize, next);
279,846✔
1626
          return NULL;
279,846✔
1627
        }
1628
      }
1629
    }
1630
    if (!cinuse(next)) {  /* consolidate forward */
174,593,468✔
1631
      if (next == fm->top) {
146,908,228✔
1632
        size_t tsize = fm->topsize += psize;
143,544✔
1633
        fm->top = p;
143,544✔
1634
        p->head = tsize | PINUSE_BIT;
143,544✔
1635
        if (p == fm->dv) {
143,544✔
1636
          fm->dv = 0;
295✔
1637
          fm->dvsize = 0;
295✔
1638
        }
1639
        if (tsize > fm->trim_check)
143,544✔
1640
          alloc_trim(fm, 0);
23✔
1641
        return NULL;
143,544✔
1642
      } else if (next == fm->dv) {
146,764,684✔
1643
        size_t dsize = fm->dvsize += psize;
816,848✔
1644
        fm->dv = p;
816,848✔
1645
        set_size_and_pinuse_of_free_chunk(p, dsize);
816,848✔
1646
        return NULL;
816,848✔
1647
      } else {
1648
        size_t nsize = chunksize(next);
145,947,836✔
1649
        psize += nsize;
145,947,836✔
1650
        unlink_chunk(fm, next, nsize);
146,025,594✔
1651
        set_size_and_pinuse_of_free_chunk(p, psize);
145,947,836✔
1652
        if (p == fm->dv) {
145,947,836✔
1653
          fm->dvsize = psize;
102,041✔
1654
          return NULL;
102,041✔
1655
        }
1656
      }
1657
    } else {
1658
      set_free_with_pinuse(p, psize, next);
27,685,240✔
1659
    }
1660

1661
    if (is_small(psize)) {
173,531,035✔
1662
      insert_small_chunk(fm, p, psize);
5,137,653✔
1663
    } else {
1664
      tchunkptr tp = (tchunkptr)p;
168,393,382✔
1665
      insert_large_chunk(fm, tp, psize);
184,423,905✔
1666
      if (--fm->release_checks == 0)
168,393,382✔
1667
        release_unused_segments(fm);
660,117✔
1668
    }
1669
  }
1670
  return NULL;
1671
#endif
1672
}
1673

1674
static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
124,084✔
1675
{
1676
#if LUAJIT_USE_ASAN_HARDENING
1677
  if (nsize >= MAX_REQUEST)
1678
    return NULL;
1679

1680
  mstate m = (mstate)msp;
1681

1682
  size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1683
  size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1684

1685
  void *newmem = lj_alloc_malloc(m, nsize);
1686

1687
  if (newmem == NULL)
1688
    return NULL;
1689

1690
  memcpy(newmem, ptr, nsize > mem_size ? mem_size : nsize);
1691
  ASAN_POISON_MEMORY_REGION(mem2alloc(ptr), poison_size);
1692
  return newmem;
1693
#else
1694
  if (nsize >= MAX_REQUEST) {
124,084✔
1695
    return NULL;
1696
  } else {
1697
    mstate m = (mstate)msp;
124,084✔
1698
    mchunkptr oldp = mem2chunk(ptr);
124,084✔
1699
    size_t oldsize = chunksize(oldp);
124,084✔
1700
    mchunkptr next = chunk_plus_offset(oldp, oldsize);
124,084✔
1701
    mchunkptr newp = 0;
124,084✔
1702
    size_t nb = request2size(nsize);
124,084✔
1703

1704
    /* Try to either shrink or extend into top. Else malloc-copy-free */
1705
    if (is_direct(oldp)) {
124,084✔
1706
      newp = direct_resize(oldp, nb);  /* this may return NULL. */
215✔
1707
    } else if (oldsize >= nb) { /* already big enough */
123,869✔
1708
      size_t rsize = oldsize - nb;
765✔
1709
      newp = oldp;
765✔
1710
      if (rsize >= MIN_CHUNK_SIZE) {
765✔
1711
        mchunkptr rem = chunk_plus_offset(newp, nb);
760✔
1712
        set_inuse(m, newp, nb);
760✔
1713
        set_inuse(m, rem, rsize);
760✔
1714
        lj_alloc_free(m, chunk2mem(rem));
760✔
1715
      }
1716
    } else if (next == m->top && oldsize + m->topsize > nb) {
123,104✔
1717
      /* Expand into top */
1718
      size_t newsize = oldsize + m->topsize;
518✔
1719
      size_t newtopsize = newsize - nb;
518✔
1720
      mchunkptr newtop = chunk_plus_offset(oldp, nb);
518✔
1721
      set_inuse(m, oldp, nb);
518✔
1722
      newtop->head = newtopsize |PINUSE_BIT;
518✔
1723
      m->top = newtop;
518✔
1724
      m->topsize = newtopsize;
518✔
1725
      newp = oldp;
518✔
1726
    }
1727

1728
    if (newp != 0) {
1,498✔
1729
      return chunk2mem(newp);
1,495✔
1730
    } else {
1731
      void *newmem = lj_alloc_malloc(m, nsize);
122,589✔
1732
      if (newmem != 0) {
122,589✔
1733
        size_t oc = oldsize - overhead_for(oldp);
122,589✔
1734
        memcpy(newmem, ptr, oc < nsize ? oc : nsize);
122,589✔
1735
        lj_alloc_free(m, ptr);
122,589✔
1736
      }
1737
      return newmem;
122,589✔
1738
    }
1739
  }
1740
#endif
1741
}
1742

1743
void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
349,667,423✔
1744
{
1745
  (void)osize;
349,667,423✔
1746
  if (nsize == 0) {
349,667,423✔
1747
    return lj_alloc_free(msp, ptr);
174,781,938✔
1748
  } else if (ptr == NULL) {
174,885,485✔
1749
    return lj_alloc_malloc(msp, nsize);
174,761,401✔
1750
  } else {
1751
    return lj_alloc_realloc(msp, ptr, nsize);
124,084✔
1752
  }
1753
}
1754

1755
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc