• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 9641682936

24 Jun 2024 07:56AM UTC coverage: 92.594% (+0.01%) from 92.581%
9641682936

push

github

mandesero
disable build without GC64

5659 of 6018 branches covered (94.03%)

Branch coverage included in aggregate %.

21597 of 23418 relevant lines covered (92.22%)

1508334.6 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.13
/src/lj_alloc.c
1
/*
2
** Bundled memory allocator.
3
**
4
** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5
** The original bears the following remark:
6
**
7
**   This is a version (aka dlmalloc) of malloc/free/realloc written by
8
**   Doug Lea and released to the public domain, as explained at
9
**   http://creativecommons.org/licenses/publicdomain.
10
**
11
**   * Version pre-2.8.4 Wed Mar 29 19:46:29 2006    (dl at gee)
12
**
13
** No additional copyright is claimed over the customizations.
14
** Please do NOT bother the original author about this version here!
15
**
16
** If you want to use dlmalloc in another project, you should get
17
** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18
** For thread-safe derivatives, take a look at:
19
** - ptmalloc: http://www.malloc.de/
20
** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21
*/
22

23
#define lj_alloc_c
24
#define LUA_CORE
25

26
/* To get the mremap prototype. Must be defined before any system includes. */
27
#if defined(__linux__) && !defined(_GNU_SOURCE)
28
#define _GNU_SOURCE
29
#endif
30

31
#include "lj_def.h"
32
#include "lj_arch.h"
33
#include "lj_alloc.h"
34

35
#ifndef LUAJIT_USE_SYSMALLOC
36

37
#define MAX_SIZE_T                (~(size_t)0)
38
#define MALLOC_ALIGNMENT        ((size_t)8U)
39

40
#define DEFAULT_GRANULARITY        ((size_t)128U * (size_t)1024U)
41
#define DEFAULT_TRIM_THRESHOLD        ((size_t)2U * (size_t)1024U * (size_t)1024U)
42
#define DEFAULT_MMAP_THRESHOLD        ((size_t)128U * (size_t)1024U)
43
#define MAX_RELEASE_CHECK_RATE        255
44

45
/* ------------------- size_t and alignment properties -------------------- */
46

47
/* The byte and bit size of a size_t */
48
#define SIZE_T_SIZE                (sizeof(size_t))
49
#define SIZE_T_BITSIZE                (sizeof(size_t) << 3)
50

51
/* Some constants coerced to size_t */
52
/* Annoying but necessary to avoid errors on some platforms */
53
#define SIZE_T_ZERO                ((size_t)0)
54
#define SIZE_T_ONE                ((size_t)1)
55
#define SIZE_T_TWO                ((size_t)2)
56
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
57
#define FOUR_SIZE_T_SIZES        (SIZE_T_SIZE<<2)
58
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59

60
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
61
#define CHUNK_ALIGN_MASK        (MALLOC_ALIGNMENT - SIZE_T_ONE)
62

63
/* the number of bytes to offset an address to align it */
64
#define align_offset(A)\
65
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67

68
/* -------------------------- MMAP support ------------------------------- */
69

70
#define MFAIL                        ((void *)(MAX_SIZE_T))
71
#define CMFAIL                        ((char *)(MFAIL)) /* defined for convenience */
72

73
#define IS_DIRECT_BIT                (SIZE_T_ONE)
74

75

76
/* Determine system-specific block allocation method. */
77
#if LJ_TARGET_WINDOWS
78

79
#define WIN32_LEAN_AND_MEAN
80
#include <windows.h>
81

82
#define LJ_ALLOC_VIRTUALALLOC        1
83

84
#if LJ_64 && !LJ_GC64
85
#define LJ_ALLOC_NTAVM                1
86
#endif
87

88
#else
89

90
#include <errno.h>
91
/* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
92
#include <sys/mman.h>
93

94
#define LJ_ALLOC_MMAP                1
95

96
#if LJ_64
97

98
#define LJ_ALLOC_MMAP_PROBE        1
99

100
#if LJ_GC64
101
#define LJ_ALLOC_MBITS                47        /* 128 TB in LJ_GC64 mode. */
102
#elif LJ_TARGET_X64 && LJ_HASJIT
103
/* Due to limitations in the x64 compiler backend. */
104
#define LJ_ALLOC_MBITS                31        /* 2 GB on x64 with !LJ_GC64. */
105
#else
106
#define LJ_ALLOC_MBITS                32        /* 4 GB on other archs with !LJ_GC64. */
107
#endif
108

109
#endif
110

111
#if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
112
#define LJ_ALLOC_MMAP32                1
113
#endif
114

115
#if LJ_TARGET_LINUX
116
#define LJ_ALLOC_MREMAP                1
117
#endif
118

119
#endif
120

121

122
#if LJ_ALLOC_VIRTUALALLOC
123

124
#if LJ_ALLOC_NTAVM
125
/* Undocumented, but hey, that's what we all love so much about Windows. */
126
typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
127
                       size_t *size, ULONG alloctype, ULONG prot);
128
static PNTAVM ntavm;
129

130
/* Number of top bits of the lower 32 bits of an address that must be zero.
131
** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
132
*/
133
#define NTAVM_ZEROBITS                1
134

135
static void init_mmap(void)
136
{
137
  ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
138
                                 "NtAllocateVirtualMemory");
139
}
140
#define INIT_MMAP()        init_mmap()
141

142
/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
143
static void *CALL_MMAP(size_t size)
144
{
145
  DWORD olderr = GetLastError();
146
  void *ptr = NULL;
147
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
148
                  MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
149
  SetLastError(olderr);
150
  return st == 0 ? ptr : MFAIL;
151
}
152

153
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
154
static void *DIRECT_MMAP(size_t size)
155
{
156
  DWORD olderr = GetLastError();
157
  void *ptr = NULL;
158
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
159
                  MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
160
  SetLastError(olderr);
161
  return st == 0 ? ptr : MFAIL;
162
}
163

164
#else
165

166
/* Win32 MMAP via VirtualAlloc */
167
static void *CALL_MMAP(size_t size)
168
{
169
  DWORD olderr = GetLastError();
170
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
171
  SetLastError(olderr);
172
  return ptr ? ptr : MFAIL;
173
}
174

175
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
176
static void *DIRECT_MMAP(size_t size)
177
{
178
  DWORD olderr = GetLastError();
179
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
180
                            PAGE_READWRITE);
181
  SetLastError(olderr);
182
  return ptr ? ptr : MFAIL;
183
}
184

185
#endif
186

187
/* This function supports releasing coalesed segments */
188
static int CALL_MUNMAP(void *ptr, size_t size)
189
{
190
  DWORD olderr = GetLastError();
191
  MEMORY_BASIC_INFORMATION minfo;
192
  char *cptr = (char *)ptr;
193
  while (size) {
194
    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
195
      return -1;
196
    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
197
        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
198
      return -1;
199
    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
200
      return -1;
201
    cptr += minfo.RegionSize;
202
    size -= minfo.RegionSize;
203
  }
204
  SetLastError(olderr);
205
  return 0;
206
}
207

208
#elif LJ_ALLOC_MMAP
209

210
#define MMAP_PROT                (PROT_READ|PROT_WRITE)
211
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
212
#define MAP_ANONYMOUS                MAP_ANON
213
#endif
214
#define MMAP_FLAGS                (MAP_PRIVATE|MAP_ANONYMOUS)
215

216
#if LJ_ALLOC_MMAP_PROBE
217

218
#ifdef MAP_TRYFIXED
219
#define MMAP_FLAGS_PROBE        (MMAP_FLAGS|MAP_TRYFIXED)
220
#else
221
#define MMAP_FLAGS_PROBE        MMAP_FLAGS
222
#endif
223

224
#define LJ_ALLOC_MMAP_PROBE_MAX                30
225
#define LJ_ALLOC_MMAP_PROBE_LINEAR        5
226

227
#define LJ_ALLOC_MMAP_PROBE_LOWER        ((uintptr_t)0x4000)
228

229
/* No point in a giant ifdef mess. Just try to open /dev/urandom.
230
** It doesn't really matter if this fails, since we get some ASLR bits from
231
** every unsuitable allocation, too. And we prefer linear allocation, anyway.
232
*/
233
#include <fcntl.h>
234
#include <unistd.h>
235

236
#if LUAJIT_USE_ASAN
237

238
/*
239
** The work of asan (AddressSanitizer) is to detect memory errors during program execution.
240
** One way to achieve this is by adding redzones around memory allocations. The redzone is a
241
** specially allocated area of memory before and after the allocated block, which is filled
242
** with a unique value. If the program tries to access memory outside of the allocation,
243
** asan detects this attempt and generates an error message, allowing the developer to
244
** detect and fix the issue early.
245
**
246
** - Original paper: https://www.usenix.org/system/files/conference/atc12/atc12-final39.pdf
247
**
248
** LuaJIT ASAN instrumentation (mmap and others):
249
**
250
** - Memory map around allocation:
251
** -------------------------------------------------------------------------------------
252
** .. .. | [f7]    ...    [f7] | [00]     ...     [0(0-7)] | [f7]    ...    [f7] | .. ..
253
**       |    left redzone     |           data            |    right redzone    |
254
**       |  REDZONE_SIZE bytes |          N bytes          |  REDZONE_SIZE bytes |
255
** ------------------------------------------------------------------------------------- 
256
**
257
** left redzone: 
258
**  The first SIZE_T_SIZE bytes of the redzone contain the data size N, the next SIZE_T_SIZE bytes 
259
**  of the redzone contain the full size of the allocation, including the alignment of the size N 
260
**  and the size of the redzones themselves.
261
*/
262

263
#include <sanitizer/asan_interface.h>
264

265
/* Recommended redzone size from 16 to 2048 bytes (must be a a power of two) 
266
** https://github.com/google/sanitizers/wiki/AddressSanitizerFlags
267
*/
268
#define REDZONE_SIZE FOUR_SIZE_T_SIZES
269

270
/* Total redzone size around allocation */
271
#define TOTAL_REDZONE_SIZE (REDZONE_SIZE << 1)
272

273
/* Multiple of the allocated memory size */
274
#define SIZE_ALIGNMENT MALLOC_ALIGNMENT
275

276
/* Multiple of the allocated memory address */
277
#define ADDR_ALIGNMENT MALLOC_ALIGNMENT
278

279
static inline uintptr_t asan_lower_address()
280
{
281
#if LJ_GC64
282
  size_t shadow_scale;
283
  size_t shadow_offset; 
284
  __asan_get_shadow_mapping(&shadow_scale, &shadow_offset);
285
  return (uintptr_t)(shadow_offset + (1ULL << (LJ_ALLOC_MBITS - shadow_scale)));
286
#else
287
  return 0x40000000;
288
#endif
289
}
290

291
/* Casting to the nearest multiple of alignment from above */
292
#define align_up_ptr(ptr, alignment)  ((void *)(((uintptr_t)(ptr) + (alignment) - 1) & ~((alignment) - 1)))
293
#define align_up_size(ptr, alignment)  ((size_t)(align_up_ptr(ptr, alignment)))
294

295
void *mark_memory_region(void *ptr, size_t mem_size, size_t poison_size)
296
{
297
  if (ptr == NULL)
298
    return NULL;
299
  size_t *sptr = (size_t *)ptr;
300
  ASAN_UNPOISON_MEMORY_REGION(ptr, TWO_SIZE_T_SIZES);
301
  sptr[0] = mem_size;
302
  sptr[1] = poison_size;
303
  ASAN_POISON_MEMORY_REGION(ptr, poison_size);
304
  ptr += REDZONE_SIZE;
305
  ASAN_UNPOISON_MEMORY_REGION(ptr, mem_size);
306
  return ptr;
307
}
308

309
typedef enum {
310
  MEM_SIZE,
311
  POISON_SIZE
312
} SizeType;
313

314
size_t asan_get_size(void *ptr, SizeType type)
315
{
316
  size_t offset = (type == MEM_SIZE) ? 0 : SIZE_T_SIZE;
317
  ASAN_UNPOISON_MEMORY_REGION(ptr - REDZONE_SIZE + offset, SIZE_T_SIZE);
318
  size_t size = *((size_t *)(ptr - REDZONE_SIZE + offset));
319
  ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE + offset, SIZE_T_SIZE);
320
  return size;
321
}
322

323
#endif
324

325
static uintptr_t mmap_probe_seed(void)
×
326
{
327
  uintptr_t val;
×
328
  int fd = open("/dev/urandom", O_RDONLY);
×
329
  if (fd != -1) {
×
330
    int ok = ((size_t)read(fd, &val, sizeof(val)) == sizeof(val));
×
331
    (void)close(fd);
×
332
    if (ok) return val;
×
333
  }
334
  return 1;  /* Punt. */
335
}
336

337
static void *mmap_probe(size_t size)
21,253✔
338
{
339
  /* Hint for next allocation. Doesn't need to be thread-safe. */
340
  static uintptr_t hint_addr = 0;
21,253✔
341
  static uintptr_t hint_prng = 0;
21,253✔
342
  int olderr = errno;
21,253✔
343
  int retry;
21,253✔
344
#if LUAJIT_USE_ASAN
345
  size_t mem_size = size;
346
  size = align_up_size(size, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
347
#endif
348
  for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
21,253✔
349
    void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
21,253✔
350
    uintptr_t addr = (uintptr_t)p;
21,253✔
351
#if LUAJIT_USE_ASAN
352
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= asan_lower_address() &&
353
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
354
#else
355
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER &&
21,253✔
356
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
21,253✔
357
#endif
358
      /* We got a suitable address. Bump the hint address. */
359
      hint_addr = addr + size;
21,253✔
360
      errno = olderr;
21,253✔
361
#if LUAJIT_USE_ASAN
362
      p = mark_memory_region(p, mem_size, size);
363
#endif
364
      return p;
21,253✔
365
    }
366
    if (p != MFAIL) {
×
367
      munmap(p, size);
×
368
    } else if (errno == ENOMEM) {
×
369
      return MFAIL;
370
    }
371
    if (hint_addr) {
×
372
      /* First, try linear probing. */
373
      if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
374
        hint_addr += 0x1000000;
×
375
        if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
×
376
          hint_addr = 0;
×
377
        continue;
×
378
      } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
379
        /* Next, try a no-hint probe to get back an ASLR address. */
380
        hint_addr = 0;
×
381
        continue;
×
382
      }
383
    }
384
    /* Finally, try pseudo-random probing. */
385
    if (LJ_UNLIKELY(hint_prng == 0)) {
×
386
      hint_prng = mmap_probe_seed();
×
387
    }
388
    /* The unsuitable address we got has some ASLR PRNG bits. */
389
    hint_addr ^= addr & ~((uintptr_t)(LJ_PAGESIZE-1));
×
390
    do {  /* The PRNG itself is very weak, but see above. */
×
391
      hint_prng = hint_prng * 1103515245 + 12345;
×
392
      hint_addr ^= hint_prng * (uintptr_t)LJ_PAGESIZE;
×
393
      hint_addr &= (((uintptr_t)1 << LJ_ALLOC_MBITS)-1);
×
394
    } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
×
395
  }
396
  errno = olderr;
×
397
  return MFAIL;
×
398
}
399

400
#endif
401

402
#if LJ_ALLOC_MMAP32
403

404
#if defined(__sun__)
405
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0x1000)
406
#else
407
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0)
408
#endif
409

410
static void *mmap_map32(size_t size)
411
{
412
#if LJ_ALLOC_MMAP_PROBE
413
  static int fallback = 0;
414
  if (fallback)
415
    return mmap_probe(size);
416
#endif
417
  {
418
    int olderr = errno;
419
#if LUAJIT_USE_ASAN
420
    size_t mem_size = size;
421
    size = align_up_size(size, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
422
#endif
423
    void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
424
#if LUAJIT_USE_ASAN
425
    if (ptr != MFAIL)
426
      ptr = mark_memory_region(ptr, mem_size, size);
427

428
    size = mem_size;
429
#endif
430
    errno = olderr;
431
    /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
432
#if LJ_ALLOC_MMAP_PROBE
433
    if (ptr == MFAIL) {
434
      fallback = 1;
435
      return mmap_probe(size);
436
    }
437
#endif
438
    return ptr;
439
  }
440
}
441

442
#endif
443

444
#if LJ_ALLOC_MMAP32
445
#define CALL_MMAP(size)                mmap_map32(size)
446
#elif LJ_ALLOC_MMAP_PROBE
447
#define CALL_MMAP(size)                mmap_probe(size)
448
#else
449
static void *CALL_MMAP(size_t size)
450
{
451
  int olderr = errno;
452
#if LUAJIT_USE_ASAN
453
  size_t mem_size = size;
454
  size = align_up_size(size, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
455
#endif
456
#if LUAJIT_USE_ASAN
457
  void *ptr = mmap((void *)asan_lower_address(), size, MMAP_PROT, MMAP_FLAGS, -1, 0);
458
#else
459
  void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
460
#endif
461
  errno = olderr;
462
#if LUAJIT_USE_ASAN
463
  ptr = mark_memory_region(ptr, mem_size, size);
464
#endif
465
  return ptr;
466
}
467
#endif
468

469
#if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
470

471
#include <sys/resource.h>
472

473
static void init_mmap(void)
474
{
475
  struct rlimit rlim;
476
  rlim.rlim_cur = rlim.rlim_max = 0x10000;
477
  setrlimit(RLIMIT_DATA, &rlim);  /* Ignore result. May fail later. */
478
}
479
#define INIT_MMAP()        init_mmap()
480

481
#endif
482

483
static int CALL_MUNMAP(void *ptr, size_t size)
3,332✔
484
{
485
  int olderr = errno;
3,332✔
486
#if LUAJIT_USE_ASAN
487
  memmove(ptr, ptr, size); /* check that memory is not poisoned */
488
  size = asan_get_size(ptr, POISON_SIZE);
489
  ptr -= REDZONE_SIZE;
490
#endif
491
  int ret = munmap(ptr, size);
3,316✔
492
#if LUAJIT_USE_ASAN
493
  if (ret == 0) {
494
    ASAN_POISON_MEMORY_REGION(ptr, size);
495
  }
496
#endif
497
  errno = olderr;
3,332✔
498
  return ret;
3,332✔
499
}
500

501
#if LJ_ALLOC_MREMAP
502
/* Need to define _GNU_SOURCE to get the mremap prototype. */
503
static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
99✔
504
{
505
  int olderr = errno;
99✔
506
#if LUAJIT_USE_ASAN && !(LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64))
507
  void *new_ptr = mmap_probe(nsz);
508
  if (new_ptr != MFAIL) {
509
    size_t oms = asan_get_size(ptr, MEM_SIZE);
510
    memcpy(new_ptr, ptr, oms);
511
    munmap(ptr, osz);
512
    ptr = new_ptr;
513
  }
514
#else
515

516
#if LUAJIT_USE_ASAN
517
  void *old_ptr = ptr;
518
  size_t nms = nsz; /* new memory size */
519
  osz = asan_get_size(old_ptr, POISON_SIZE);
520
  nsz = align_up_size(nsz, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
521
  ptr -= REDZONE_SIZE;
522
#endif
523
  ptr = mremap(ptr, osz, nsz, flags);
198✔
524
#if LUAJIT_USE_ASAN
525
  if (ptr != MFAIL) { 
526
    /* can return a pointer to the same memory */
527
    ASAN_POISON_MEMORY_REGION(old_ptr - REDZONE_SIZE, osz);
528
    ptr = mark_memory_region(ptr, nms, nsz);
529
  }
530
#endif
531
#endif
532
  errno = olderr;
99✔
533
  return ptr;
99✔
534
}
535

536
#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
537
#define CALL_MREMAP_NOMOVE        0
538
#define CALL_MREMAP_MAYMOVE        1
539
#if LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64)
540
#define CALL_MREMAP_MV                CALL_MREMAP_NOMOVE
541
#else
542
#define CALL_MREMAP_MV                CALL_MREMAP_MAYMOVE
543
#endif
544
#endif
545

546
#endif
547

548

549
#ifndef INIT_MMAP
550
#define INIT_MMAP()                ((void)0)
551
#endif
552

553
#ifndef DIRECT_MMAP
554
#define DIRECT_MMAP(s)                CALL_MMAP(s)
555
#endif
556

557
#ifndef CALL_MREMAP
558
#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
559
#endif
560

561
/* -----------------------  Chunk representations ------------------------ */
562

563
struct malloc_chunk {
564
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
565
  size_t               head;       /* Size and inuse bits. */
566
  struct malloc_chunk *fd;         /* double links -- used only if free. */
567
  struct malloc_chunk *bk;
568
};
569

570
typedef struct malloc_chunk  mchunk;
571
typedef struct malloc_chunk *mchunkptr;
572
typedef struct malloc_chunk *sbinptr;  /* The type of bins of chunks */
573
typedef size_t bindex_t;               /* Described below */
574
typedef unsigned int binmap_t;         /* Described below */
575
typedef unsigned int flag_t;           /* The type of various bit flag sets */
576

577
/* ------------------- Chunks sizes and alignments ----------------------- */
578

579
#define MCHUNK_SIZE                (sizeof(mchunk))
580

581
#define CHUNK_OVERHEAD                (SIZE_T_SIZE)
582

583
/* Direct chunks need a second word of overhead ... */
584
#define DIRECT_CHUNK_OVERHEAD        (TWO_SIZE_T_SIZES)
585
/* ... and additional padding for fake next-chunk at foot */
586
#define DIRECT_FOOT_PAD                (FOUR_SIZE_T_SIZES)
587

588
/* The smallest size we can malloc is an aligned minimal chunk */
589
#define MIN_CHUNK_SIZE\
590
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
591

592
#if LUAJIT_USE_ASAN
593
/* conversion from malloc headers to user pointers, and back */
594
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES + REDZONE_SIZE))
595
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES - REDZONE_SIZE))
596
#else
597
/* conversion from malloc headers to user pointers, and back */
598
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
599
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
600
#endif
601
/* chunk associated with aligned address A */
602
#define align_as_chunk(A)        (mchunkptr)((A) + align_offset(chunk2mem(A)))
603

604
/* Bounds on request (not chunk) sizes. */
605
#define MAX_REQUEST                ((~MIN_CHUNK_SIZE+1) << 2)
606
#define MIN_REQUEST                (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
607

608
/* pad request bytes into a usable size */
609
#define pad_request(req) \
610
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
611

612
/* pad request, checking for minimum (but not maximum) */
613
#define request2size(req) \
614
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
615

616
/* ------------------ Operations on head and foot fields ----------------- */
617

618
#define PINUSE_BIT                (SIZE_T_ONE)
619
#define CINUSE_BIT                (SIZE_T_TWO)
620
#define INUSE_BITS                (PINUSE_BIT|CINUSE_BIT)
621

622
/* Head value for fenceposts */
623
#define FENCEPOST_HEAD                (INUSE_BITS|SIZE_T_SIZE)
624

625
/* extraction of fields from head words */
626
#define cinuse(p)                ((p)->head & CINUSE_BIT)
627
#define pinuse(p)                ((p)->head & PINUSE_BIT)
628
#define chunksize(p)                ((p)->head & ~(INUSE_BITS))
629

630
#define clear_pinuse(p)                ((p)->head &= ~PINUSE_BIT)
631
#define clear_cinuse(p)                ((p)->head &= ~CINUSE_BIT)
632

633
/* Treat space at ptr +/- offset as a chunk */
634
#define chunk_plus_offset(p, s)                ((mchunkptr)(((char *)(p)) + (s)))
635
#define chunk_minus_offset(p, s)        ((mchunkptr)(((char *)(p)) - (s)))
636

637
/* Ptr to next or previous physical malloc_chunk. */
638
#define next_chunk(p)        ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
639
#define prev_chunk(p)        ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
640

641
/* extract next chunk's pinuse bit */
642
#define next_pinuse(p)        ((next_chunk(p)->head) & PINUSE_BIT)
643

644
/* Get/set size at footer */
645
#define get_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot)
646
#define set_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
647

648
/* Set size, pinuse bit, and foot */
649
#define set_size_and_pinuse_of_free_chunk(p, s)\
650
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
651

652
/* Set size, pinuse bit, foot, and clear next pinuse */
653
#define set_free_with_pinuse(p, s, n)\
654
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
655

656
#define is_direct(p)\
657
  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
658

659
/* Get the internal overhead associated with chunk p */
660
#define overhead_for(p)\
661
 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
662

663
/* ---------------------- Overlaid data structures ----------------------- */
664

665
struct malloc_tree_chunk {
666
  /* The first four fields must be compatible with malloc_chunk */
667
  size_t                    prev_foot;
668
  size_t                    head;
669
  struct malloc_tree_chunk *fd;
670
  struct malloc_tree_chunk *bk;
671

672
  struct malloc_tree_chunk *child[2];
673
  struct malloc_tree_chunk *parent;
674
  bindex_t                  index;
675
};
676

677
typedef struct malloc_tree_chunk  tchunk;
678
typedef struct malloc_tree_chunk *tchunkptr;
679
typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
680

681
/* A little helper macro for trees */
682
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
683

684
/* ----------------------------- Segments -------------------------------- */
685

686
struct malloc_segment {
687
  char        *base;             /* base address */
688
  size_t       size;             /* allocated size */
689
  struct malloc_segment *next;   /* ptr to next segment */
690
};
691

692
typedef struct malloc_segment  msegment;
693
typedef struct malloc_segment *msegmentptr;
694

695
/* ---------------------------- malloc_state ----------------------------- */
696

697
/* Bin types, widths and sizes */
698
#define NSMALLBINS                (32U)
699
#define NTREEBINS                (32U)
700
#define SMALLBIN_SHIFT                (3U)
701
#define SMALLBIN_WIDTH                (SIZE_T_ONE << SMALLBIN_SHIFT)
702
#define TREEBIN_SHIFT                (8U)
703
#define MIN_LARGE_SIZE                (SIZE_T_ONE << TREEBIN_SHIFT)
704
#define MAX_SMALL_SIZE                (MIN_LARGE_SIZE - SIZE_T_ONE)
705
#define MAX_SMALL_REQUEST  (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
706

707
struct malloc_state {
708
  binmap_t   smallmap;
709
  binmap_t   treemap;
710
  size_t     dvsize;
711
  size_t     topsize;
712
  mchunkptr  dv;
713
  mchunkptr  top;
714
  size_t     trim_check;
715
  size_t     release_checks;
716
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
717
  tbinptr    treebins[NTREEBINS];
718
  msegment   seg;
719
};
720

721
typedef struct malloc_state *mstate;
722

723
#define is_initialized(M)        ((M)->top != 0)
724

725
/* -------------------------- system alloc setup ------------------------- */
726

727
/* page-align a size */
728
#define page_align(S)\
729
 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
730

731
/* granularity-align a size */
732
#define granularity_align(S)\
733
  (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
734
   & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
735

736
#if LJ_TARGET_WINDOWS
737
#define mmap_align(S)        granularity_align(S)
738
#else
739
#define mmap_align(S)        page_align(S)
740
#endif
741

742
/*  True if segment S holds address A */
743
#define segment_holds(S, A)\
744
  ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
745

746
/* Return segment holding given address */
747
static msegmentptr segment_holding(mstate m, char *addr)
74✔
748
{
749
  msegmentptr sp = &m->seg;
74✔
750
  for (;;) {
74✔
751
    if (addr >= sp->base && addr < sp->base + sp->size)
74✔
752
      return sp;
753
    if ((sp = sp->next) == 0)
×
754
      return 0;
755
  }
756
}
757

758
/* Return true if segment contains a segment link */
759
static int has_segment_link(mstate m, msegmentptr ss)
21✔
760
{
761
  msegmentptr sp = &m->seg;
21✔
762
  for (;;) {
61✔
763
    if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
61✔
764
      return 1;
765
    if ((sp = sp->next) == 0)
61✔
766
      return 0;
767
  }
768
}
769

770
/*
771
  TOP_FOOT_SIZE is padding at the end of a segment, including space
772
  that may be needed to place segment records and fenceposts when new
773
  noncontiguous segments are added.
774
*/
775
#define TOP_FOOT_SIZE\
776
  (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
777

778
/* ---------------------------- Indexing Bins ---------------------------- */
779

780
#define is_small(s)                (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
781
#define small_index(s)                ((s)  >> SMALLBIN_SHIFT)
782
#define small_index2size(i)        ((i)  << SMALLBIN_SHIFT)
783
#define MIN_SMALL_INDEX                (small_index(MIN_CHUNK_SIZE))
784

785
/* addressing by index. See above about smallbin repositioning */
786
#define smallbin_at(M, i)        ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
787
#define treebin_at(M,i)                (&((M)->treebins[i]))
788

789
/* assign tree index for size S to variable I */
790
#define compute_tree_index(S, I)\
791
{\
792
  unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
793
  if (X == 0) {\
794
    I = 0;\
795
  } else if (X > 0xFFFF) {\
796
    I = NTREEBINS-1;\
797
  } else {\
798
    unsigned int K = lj_fls(X);\
799
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
800
  }\
801
}
802

803
/* Bit representing maximum resolved size in a treebin at i */
804
#define bit_for_tree_index(i) \
805
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
806

807
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
808
#define leftshift_for_tree_index(i) \
809
   ((i == NTREEBINS-1)? 0 : \
810
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
811

812
/* The size of the smallest chunk held in bin with index i */
813
#define minsize_for_tree_index(i) \
814
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
815
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
816

817
/* ------------------------ Operations on bin maps ----------------------- */
818

819
/* bit corresponding to given index */
820
#define idx2bit(i)                ((binmap_t)(1) << (i))
821

822
/* Mark/Clear bits with given index */
823
#define mark_smallmap(M,i)        ((M)->smallmap |=  idx2bit(i))
824
#define clear_smallmap(M,i)        ((M)->smallmap &= ~idx2bit(i))
825
#define smallmap_is_marked(M,i)        ((M)->smallmap &   idx2bit(i))
826

827
#define mark_treemap(M,i)        ((M)->treemap  |=  idx2bit(i))
828
#define clear_treemap(M,i)        ((M)->treemap  &= ~idx2bit(i))
829
#define treemap_is_marked(M,i)        ((M)->treemap  &   idx2bit(i))
830

831
/* mask with all bits to left of least bit of x on */
832
#define left_bits(x)                ((x<<1) | (~(x<<1)+1))
833

834
/* Set cinuse bit and pinuse bit of next chunk */
835
#define set_inuse(M,p,s)\
836
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
837
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
838

839
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
840
#define set_inuse_and_pinuse(M,p,s)\
841
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
842
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
843

844
/* Set size, cinuse and pinuse bit of this chunk */
845
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
846
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
847

848
/* ----------------------- Operations on smallbins ----------------------- */
849

850
/* Link a free chunk into a smallbin  */
851
#define insert_small_chunk(M, P, S) {\
852
  bindex_t I = small_index(S);\
853
  mchunkptr B = smallbin_at(M, I);\
854
  mchunkptr F = B;\
855
  if (!smallmap_is_marked(M, I))\
856
    mark_smallmap(M, I);\
857
  else\
858
    F = B->fd;\
859
  B->fd = P;\
860
  F->bk = P;\
861
  P->fd = F;\
862
  P->bk = B;\
863
}
864

865
/* Unlink a chunk from a smallbin  */
866
#define unlink_small_chunk(M, P, S) {\
867
  mchunkptr F = P->fd;\
868
  mchunkptr B = P->bk;\
869
  bindex_t I = small_index(S);\
870
  if (F == B) {\
871
    clear_smallmap(M, I);\
872
  } else {\
873
    F->bk = B;\
874
    B->fd = F;\
875
  }\
876
}
877

878
/* Unlink the first chunk from a smallbin */
879
#define unlink_first_small_chunk(M, B, P, I) {\
880
  mchunkptr F = P->fd;\
881
  if (B == F) {\
882
    clear_smallmap(M, I);\
883
  } else {\
884
    B->fd = F;\
885
    F->bk = B;\
886
  }\
887
}
888

889
/* Replace dv node, binning the old one */
890
/* Used only when dvsize known to be small */
891
#define replace_dv(M, P, S) {\
892
  size_t DVS = M->dvsize;\
893
  if (DVS != 0) {\
894
    mchunkptr DV = M->dv;\
895
    insert_small_chunk(M, DV, DVS);\
896
  }\
897
  M->dvsize = S;\
898
  M->dv = P;\
899
}
900

901
/* ------------------------- Operations on trees ------------------------- */
902

903
/* Insert chunk into tree */
904
#define insert_large_chunk(M, X, S) {\
905
  tbinptr *H;\
906
  bindex_t I;\
907
  compute_tree_index(S, I);\
908
  H = treebin_at(M, I);\
909
  X->index = I;\
910
  X->child[0] = X->child[1] = 0;\
911
  if (!treemap_is_marked(M, I)) {\
912
    mark_treemap(M, I);\
913
    *H = X;\
914
    X->parent = (tchunkptr)H;\
915
    X->fd = X->bk = X;\
916
  } else {\
917
    tchunkptr T = *H;\
918
    size_t K = S << leftshift_for_tree_index(I);\
919
    for (;;) {\
920
      if (chunksize(T) != S) {\
921
        tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
922
        K <<= 1;\
923
        if (*C != 0) {\
924
          T = *C;\
925
        } else {\
926
          *C = X;\
927
          X->parent = T;\
928
          X->fd = X->bk = X;\
929
          break;\
930
        }\
931
      } else {\
932
        tchunkptr F = T->fd;\
933
        T->fd = F->bk = X;\
934
        X->fd = F;\
935
        X->bk = T;\
936
        X->parent = 0;\
937
        break;\
938
      }\
939
    }\
940
  }\
941
}
942

943
#define unlink_large_chunk(M, X) {\
944
  tchunkptr XP = X->parent;\
945
  tchunkptr R;\
946
  if (X->bk != X) {\
947
    tchunkptr F = X->fd;\
948
    R = X->bk;\
949
    F->bk = R;\
950
    R->fd = F;\
951
  } else {\
952
    tchunkptr *RP;\
953
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
954
        ((R = *(RP = &(X->child[0]))) != 0)) {\
955
      tchunkptr *CP;\
956
      while ((*(CP = &(R->child[1])) != 0) ||\
957
             (*(CP = &(R->child[0])) != 0)) {\
958
        R = *(RP = CP);\
959
      }\
960
      *RP = 0;\
961
    }\
962
  }\
963
  if (XP != 0) {\
964
    tbinptr *H = treebin_at(M, X->index);\
965
    if (X == *H) {\
966
      if ((*H = R) == 0) \
967
        clear_treemap(M, X->index);\
968
    } else {\
969
      if (XP->child[0] == X) \
970
        XP->child[0] = R;\
971
      else \
972
        XP->child[1] = R;\
973
    }\
974
    if (R != 0) {\
975
      tchunkptr C0, C1;\
976
      R->parent = XP;\
977
      if ((C0 = X->child[0]) != 0) {\
978
        R->child[0] = C0;\
979
        C0->parent = R;\
980
      }\
981
      if ((C1 = X->child[1]) != 0) {\
982
        R->child[1] = C1;\
983
        C1->parent = R;\
984
      }\
985
    }\
986
  }\
987
}
988

989
/* Relays to large vs small bin operations */
990

991
#define insert_chunk(M, P, S)\
992
  if (is_small(S)) { insert_small_chunk(M, P, S)\
993
  } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
994

995
#define unlink_chunk(M, P, S)\
996
  if (is_small(S)) { unlink_small_chunk(M, P, S)\
997
  } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
998

999
/* -----------------------  Direct-mmapping chunks ----------------------- */
1000

1001
static void *direct_alloc(size_t nb)
2,964✔
1002
{
1003
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2,964✔
1004
  if (LJ_LIKELY(mmsize > nb)) {     /* Check for wrap around 0 */
2,964✔
1005
    char *mm = (char *)(DIRECT_MMAP(mmsize));
2,964✔
1006
    if (mm != CMFAIL) {
2,964✔
1007
      size_t offset = align_offset(chunk2mem(mm));
2,964✔
1008
      size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
2,964✔
1009
      mchunkptr p = (mchunkptr)(mm + offset);
2,964✔
1010
      p->prev_foot = offset | IS_DIRECT_BIT;
2,964✔
1011
      p->head = psize|CINUSE_BIT;
2,964✔
1012
      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2,964✔
1013
      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2,964✔
1014
      return chunk2mem(p);
2,964✔
1015
    }
1016
  }
1017
  return NULL;
1018
}
1019

1020
static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
112✔
1021
{
1022
  size_t oldsize = chunksize(oldp);
112✔
1023
  if (is_small(nb)) /* Can't shrink direct regions below small size */
112✔
1024
    return NULL;
1025
  /* Keep old chunk if big enough but not too big */
1026
  if (oldsize >= nb + SIZE_T_SIZE &&
109✔
1027
      (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
61✔
1028
    return oldp;
1029
  } else {
1030
    size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
78✔
1031
    size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
78✔
1032
    size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
78✔
1033
    char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
78✔
1034
                                   oldmmsize, newmmsize, CALL_MREMAP_MV);
1035
    if (cp != CMFAIL) {
78✔
1036
      mchunkptr newp = (mchunkptr)(cp + offset);
78✔
1037
      size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
78✔
1038
      newp->head = psize|CINUSE_BIT;
78✔
1039
      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
78✔
1040
      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
78✔
1041
      return newp;
78✔
1042
    }
1043
  }
1044
  return NULL;
1045
}
1046

1047
/* -------------------------- mspace management -------------------------- */
1048

1049
/* Initialize top chunk and its size */
1050
static void init_top(mstate m, mchunkptr p, size_t psize)
461✔
1051
{
1052
  /* Ensure alignment */
1053
  void *t = chunk2mem(p);
461✔
1054
#if LUAJIT_USE_ASAN
1055
  t -= REDZONE_SIZE;
1056
#endif
1057
  size_t offset = align_offset(t);
×
1058

1059
  p = (mchunkptr)((char *)p + offset);
461✔
1060
  psize -= offset;
461✔
1061

1062
  m->top = p;
461✔
1063
  m->topsize = psize;
461✔
1064
  p->head = psize | PINUSE_BIT;
461✔
1065
  /* set size of fake trailing chunk holding overhead space only once */
1066
  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
461✔
1067
  m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
461✔
1068
}
83✔
1069

1070
/* Initialize bins for a new mstate that is otherwise zeroed out */
1071
static void init_bins(mstate m)
325✔
1072
{
1073
  /* Establish circular links for smallbins */
1074
  bindex_t i;
325✔
1075
  for (i = 0; i < NSMALLBINS; i++) {
10,725✔
1076
    sbinptr bin = smallbin_at(m,i);
10,400✔
1077
    bin->fd = bin->bk = bin;
10,400✔
1078
  }
1079
}
1080

1081
/* Allocate chunk and prepend remainder with chunk in successor base. */
1082
static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
17,849✔
1083
{
1084
  mchunkptr p = align_as_chunk(newbase);
17,849✔
1085
  mchunkptr oldfirst = align_as_chunk(oldbase);
17,849✔
1086
  size_t psize = (size_t)((char *)oldfirst - (char *)p);
17,849✔
1087
  mchunkptr q = chunk_plus_offset(p, nb);
17,849✔
1088
  size_t qsize = psize - nb;
17,849✔
1089
  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
17,849✔
1090

1091
  /* consolidate remainder with first chunk of old base */
1092
  if (oldfirst == m->top) {
17,849✔
1093
    size_t tsize = m->topsize += qsize;
×
1094
    m->top = q;
×
1095
    q->head = tsize | PINUSE_BIT;
×
1096
  } else if (oldfirst == m->dv) {
17,849✔
1097
    size_t dsize = m->dvsize += qsize;
×
1098
    m->dv = q;
×
1099
    set_size_and_pinuse_of_free_chunk(q, dsize);
×
1100
  } else {
1101
    if (!cinuse(oldfirst)) {
17,849✔
1102
      size_t nsize = chunksize(oldfirst);
8✔
1103
      unlink_chunk(m, oldfirst, nsize);
10✔
1104
      oldfirst = chunk_plus_offset(oldfirst, nsize);
8✔
1105
      qsize += nsize;
8✔
1106
    }
1107
    set_free_with_pinuse(q, qsize, oldfirst);
17,849✔
1108
    insert_chunk(m, q, qsize);
17,849✔
1109
  }
1110

1111
  return chunk2mem(p);
17,849✔
1112
}
1113

1114
/* Add a segment to hold a new noncontiguous region */
1115
static void add_segment(mstate m, char *tbase, size_t tsize)
53✔
1116
{
1117
  /* Determine locations and sizes of segment, fenceposts, old top */
1118
  char *old_top = (char *)m->top;
53✔
1119
  msegmentptr oldsp = segment_holding(m, old_top);
53✔
1120
#if LUAJIT_USE_ASAN
1121
  ASAN_UNPOISON_MEMORY_REGION(oldsp, sizeof(struct malloc_segment));
1122
#endif
1123
  char *old_end = oldsp->base + oldsp->size;
53✔
1124
  size_t ssize = pad_request(sizeof(struct malloc_segment));
53✔
1125
  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
53✔
1126
  size_t offset = align_offset(chunk2mem(rawsp));
53✔
1127
  char *asp = rawsp + offset;
53✔
1128
  char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
53✔
1129
  mchunkptr sp = (mchunkptr)csp;
53✔
1130
  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
53✔
1131
#if LUAJIT_USE_ASAN
1132
  ss = (msegmentptr)((void *)ss - REDZONE_SIZE);
1133
#endif
1134
  mchunkptr tnext = chunk_plus_offset(sp, ssize);
53✔
1135
  mchunkptr p = tnext;
53✔
1136

1137
  /* reset top to new space */
1138
  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
53✔
1139

1140
  /* Set up segment record */
1141
  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
53✔
1142
  *ss = m->seg; /* Push current record */
53✔
1143
  m->seg.base = tbase;
53✔
1144
  m->seg.size = tsize;
53✔
1145
  m->seg.next = ss;
53✔
1146

1147
  /* Insert trailing fenceposts */
1148
  for (;;) {
181✔
1149
    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
181✔
1150
    p->head = FENCEPOST_HEAD;
181✔
1151
    if ((char *)(&(nextp->head)) < old_end)
181✔
1152
      p = nextp;
1153
    else
1154
      break;
1155
  }
1156

1157
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1158
  if (csp != old_top) {
53✔
1159
    mchunkptr q = (mchunkptr)old_top;
41✔
1160
    size_t psize = (size_t)(csp - old_top);
41✔
1161
    mchunkptr tn = chunk_plus_offset(q, psize);
41✔
1162
    set_free_with_pinuse(q, psize, tn);
41✔
1163
    insert_chunk(m, q, psize);
59✔
1164
  }
1165
}
53✔
1166

1167
/* -------------------------- System allocation -------------------------- */
1168

1169
static void *alloc_sys(mstate m, size_t nb)
20,928✔
1170
{
1171
  char *tbase = CMFAIL;
20,928✔
1172
  size_t tsize = 0;
20,928✔
1173

1174
  /* Directly map large chunks */
1175
  if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
20,928✔
1176
    void *mem = direct_alloc(nb);
2,964✔
1177
    if (mem != 0)
2,964✔
1178
      return mem;
1179
  }
1180

1181
  {
1182
    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
17,964✔
1183
    size_t rsize = granularity_align(req);
17,964✔
1184
    if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
17,964✔
1185
      char *mp = (char *)(CALL_MMAP(rsize));
17,964✔
1186
      if (mp != CMFAIL) {
17,964✔
1187
        tbase = mp;
17,964✔
1188
        tsize = rsize;
17,964✔
1189
      }
1190
    }
1191
  }
1192

1193
  if (tbase != CMFAIL) {
17,964✔
1194
    msegmentptr sp = &m->seg;
17,964✔
1195
    /* Try to merge with an existing segment */
1196
    while (sp != 0 && tbase != sp->base + sp->size)
238,093✔
1197
      sp = sp->next;
220,129✔
1198
    if (sp != 0 && segment_holds(sp, m->top)) { /* append */
17,964✔
1199
      sp->size += tsize;
62✔
1200
      init_top(m, m->top, m->topsize + tsize);
62✔
1201
    } else {
1202
      sp = &m->seg;
1203
      while (sp != 0 && sp->base != tbase + tsize)
30,525✔
1204
        sp = sp->next;
12,623✔
1205
      if (sp != 0) {
17,902✔
1206
        char *oldbase = sp->base;
17,849✔
1207
        sp->base = tbase;
17,849✔
1208
        sp->size += tsize;
17,849✔
1209
        return prepend_alloc(m, tbase, oldbase, nb);
17,849✔
1210
      } else {
1211
        add_segment(m, tbase, tsize);
53✔
1212
      }
1213
    }
1214

1215
    if (nb < m->topsize) { /* Allocate from new or extended top space */
115✔
1216
      size_t rsize = m->topsize -= nb;
115✔
1217
      mchunkptr p = m->top;
115✔
1218
      mchunkptr r = m->top = chunk_plus_offset(p, nb);
115✔
1219
      r->head = rsize | PINUSE_BIT;
115✔
1220
      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
115✔
1221
      return chunk2mem(p);
115✔
1222
    }
1223
  }
1224

1225
  return NULL;
1226
}
1227

1228
/* -----------------------  system deallocation -------------------------- */
1229

1230
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1231
static size_t release_unused_segments(mstate m)
279,239✔
1232
{
1233
  size_t released = 0;
279,239✔
1234
  size_t nsegs = 0;
279,239✔
1235
  msegmentptr pred = &m->seg;
279,239✔
1236
  msegmentptr sp = pred->next;
279,239✔
1237
  while (sp != 0) {
3,754,836✔
1238
    char *base = sp->base;
3,475,597✔
1239
    size_t size = sp->size;
3,475,597✔
1240
    msegmentptr next = sp->next;
3,475,597✔
1241
    nsegs++;
3,475,597✔
1242
    {
1243
      mchunkptr p = align_as_chunk(base);
3,475,597✔
1244
      size_t psize = chunksize(p);
3,475,597✔
1245
      /* Can unmap if first chunk holds entire segment and not pinned */
1246
      if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
3,475,597✔
1247
        tchunkptr tp = (tchunkptr)p;
16✔
1248
        if (p == m->dv) {
16✔
1249
          m->dv = 0;
×
1250
          m->dvsize = 0;
×
1251
        } else {
1252
          unlink_large_chunk(m, tp);
17✔
1253
        }
1254
        if (CALL_MUNMAP(base, size) == 0) {
16✔
1255
          released += size;
16✔
1256
          /* unlink obsoleted record */
1257
          sp = pred;
16✔
1258
          sp->next = next;
16✔
1259
        } else { /* back out if cannot unmap */
1260
          insert_large_chunk(m, tp, psize);
×
1261
        }
1262
      }
1263
    }
1264
    pred = sp;
1265
    sp = next;
1266
  }
1267
  /* Reset check counter */
1268
  m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
279,239✔
1269
                      nsegs : MAX_RELEASE_CHECK_RATE;
279,239✔
1270
  return released;
279,239✔
1271
}
1272

1273
static int alloc_trim(mstate m, size_t pad)
21✔
1274
{
1275
  size_t released = 0;
21✔
1276
  if (pad < MAX_REQUEST && is_initialized(m)) {
21✔
1277
    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
21✔
1278

1279
    if (m->topsize > pad) {
21✔
1280
      /* Shrink top space in granularity-size units, keeping at least one */
1281
      size_t unit = DEFAULT_GRANULARITY;
21✔
1282
      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
21✔
1283
                      SIZE_T_ONE) * unit;
1284
      msegmentptr sp = segment_holding(m, (char *)m->top);
21✔
1285

1286
      if (sp->size >= extra &&
21✔
1287
          !has_segment_link(m, sp)) { /* can't shrink if pinned */
42✔
1288
        size_t newsize = sp->size - extra;
21✔
1289
        /* Prefer mremap, fall back to munmap */
1290
        if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
21✔
1291
            (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
×
1292
          released = extra;
21✔
1293
        }
1294
      }
1295

1296
      if (released != 0) {
21✔
1297
        sp->size -= released;
21✔
1298
        init_top(m, m->top, m->topsize - released);
21✔
1299
      }
1300
    }
1301

1302
    /* Unmap any unused mmapped segments */
1303
    released += release_unused_segments(m);
21✔
1304

1305
    /* On failure, disable autotrim to avoid repeated failed future calls */
1306
    if (released == 0 && m->topsize > m->trim_check)
21✔
1307
      m->trim_check = MAX_SIZE_T;
×
1308
  }
1309

1310
  return (released != 0)? 1 : 0;
21✔
1311
}
1312

1313
/* ---------------------------- malloc support --------------------------- */
1314

1315
/* allocate a large request from the best fitting chunk in a treebin */
1316
static void *tmalloc_large(mstate m, size_t nb)
286,462✔
1317
{
1318
  tchunkptr v = 0;
286,462✔
1319
  size_t rsize = ~nb+1; /* Unsigned negation */
286,462✔
1320
  tchunkptr t;
286,462✔
1321
  bindex_t idx;
286,462✔
1322
  compute_tree_index(nb, idx);
286,462✔
1323

1324
  if ((t = *treebin_at(m, idx)) != 0) {
286,462✔
1325
    /* Traverse tree for this bin looking for node with size == nb */
1326
    size_t sizebits = nb << leftshift_for_tree_index(idx);
112,284✔
1327
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
112,284✔
1328
    for (;;) {
249,316✔
1329
      tchunkptr rt;
180,800✔
1330
      size_t trem = chunksize(t) - nb;
180,800✔
1331
      if (trem < rsize) {
180,800✔
1332
        v = t;
107,381✔
1333
        if ((rsize = trem) == 0)
107,381✔
1334
          break;
1335
      }
1336
      rt = t->child[1];
158,312✔
1337
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
158,312✔
1338
      if (rt != 0 && rt != t)
158,312✔
1339
        rst = rt;
54,114✔
1340
      if (t == 0) {
158,312✔
1341
        t = rst; /* set t to least subtree holding sizes > nb */
1342
        break;
1343
      }
1344
      sizebits <<= 1;
68,516✔
1345
    }
1346
  }
1347

1348
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
286,462✔
1349
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
220,762✔
1350
    if (leftbits != 0)
220,762✔
1351
      t = *treebin_at(m, lj_ffs(leftbits));
209,638✔
1352
  }
1353

1354
  while (t != 0) { /* find smallest of tree or subtree */
648,974✔
1355
    size_t trem = chunksize(t) - nb;
362,512✔
1356
    if (trem < rsize) {
362,512✔
1357
      rsize = trem;
280,867✔
1358
      v = t;
280,867✔
1359
    }
1360
    t = leftmost_child(t);
362,512✔
1361
  }
1362

1363
  /*  If dv is a better fit, return NULL so malloc will use it */
1364
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
286,462✔
1365
    mchunkptr r = chunk_plus_offset(v, nb);
234,793✔
1366
    unlink_large_chunk(m, v);
249,156✔
1367
    if (rsize < MIN_CHUNK_SIZE) {
234,793✔
1368
      set_inuse_and_pinuse(m, v, (rsize + nb));
35,382✔
1369
    } else {
1370
      set_size_and_pinuse_of_inuse_chunk(m, v, nb);
199,411✔
1371
      set_size_and_pinuse_of_free_chunk(r, rsize);
199,411✔
1372
      insert_chunk(m, r, rsize);
225,791✔
1373
    }
1374
    return chunk2mem(v);
234,793✔
1375
  }
1376
  return NULL;
1377
}
1378

1379
/* allocate a small request from the best fitting chunk in a treebin */
1380
static void *tmalloc_small(mstate m, size_t nb)
153,988✔
1381
{
1382
  tchunkptr t, v;
153,988✔
1383
  mchunkptr r;
153,988✔
1384
  size_t rsize;
153,988✔
1385
  bindex_t i = lj_ffs(m->treemap);
153,988✔
1386

1387
  v = t = *treebin_at(m, i);
153,988✔
1388
  rsize = chunksize(t) - nb;
153,988✔
1389

1390
  while ((t = leftmost_child(t)) != 0) {
350,118✔
1391
    size_t trem = chunksize(t) - nb;
196,130✔
1392
    if (trem < rsize) {
196,130✔
1393
      rsize = trem;
101,213✔
1394
      v = t;
101,213✔
1395
    }
1396
  }
1397

1398
  r = chunk_plus_offset(v, nb);
153,988✔
1399
  unlink_large_chunk(m, v);
190,419✔
1400
  if (rsize < MIN_CHUNK_SIZE) {
153,988✔
1401
    set_inuse_and_pinuse(m, v, (rsize + nb));
29✔
1402
  } else {
1403
    set_size_and_pinuse_of_inuse_chunk(m, v, nb);
153,959✔
1404
    set_size_and_pinuse_of_free_chunk(r, rsize);
153,959✔
1405
    replace_dv(m, r, rsize);
153,959✔
1406
  }
1407
  return chunk2mem(v);
153,988✔
1408
}
1409

1410
/* ----------------------------------------------------------------------- */
1411

1412
void *lj_alloc_create(void)
325✔
1413
{
1414
  size_t tsize = DEFAULT_GRANULARITY;
325✔
1415
#if LUAJIT_USE_ASAN
1416
  tsize -= TOTAL_REDZONE_SIZE;
1417
#endif
1418
  char *tbase;
325✔
1419
  INIT_MMAP();
325✔
1420
  tbase = (char *)(CALL_MMAP(tsize));
325✔
1421
  if (tbase != CMFAIL) {
325✔
1422
    size_t msize = pad_request(sizeof(struct malloc_state));
325✔
1423
    mchunkptr mn;
325✔
1424
#if LUAJIT_USE_ASAN
1425
    mchunkptr msp = (mchunkptr)(tbase + align_offset(chunk2mem(tbase) - REDZONE_SIZE));
1426
    mstate m = (mstate)(chunk2mem(msp) - REDZONE_SIZE);
1427
#else
1428
    mchunkptr msp = align_as_chunk(tbase);
325✔
1429
    mstate m = (mstate)(chunk2mem(msp));
325✔
1430
#endif
1431
    memset(m, 0, msize);
325✔
1432
    msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
325✔
1433
    m->seg.base = tbase;
325✔
1434
    m->seg.size = tsize;
325✔
1435
    m->release_checks = MAX_RELEASE_CHECK_RATE;
325✔
1436
    init_bins(m);
325✔
1437
#if LUAJIT_USE_ASAN
1438
    mn = next_chunk((mchunkptr)((char *)(m) - TWO_SIZE_T_SIZES));
1439
#else
1440
    mn = next_chunk(mem2chunk(m));
325✔
1441
#endif
1442
    init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
325✔
1443
    return m;
325✔
1444
  }
1445
  return NULL;
1446
}
1447

1448
void lj_alloc_destroy(void *msp)
315✔
1449
{
1450
  mstate ms = (mstate)msp;
315✔
1451
  msegmentptr sp = &ms->seg;
315✔
1452
  while (sp != 0) {
667✔
1453
    char *base = sp->base;
352✔
1454
    size_t size = sp->size;
352✔
1455
    sp = sp->next;
352✔
1456
#if LUAJIT_USE_ASAN
1457
    ASAN_UNPOISON_MEMORY_REGION(base, size);
1458
#endif
1459
    CALL_MUNMAP(base, size);
352✔
1460
  }
1461
}
315✔
1462

1463
static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
76,810,153✔
1464
{
1465
#if LUAJIT_USE_ASAN
1466
  size_t mem_size = nsize;
1467
  size_t poison_size = align_up_size(nsize, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
1468
  nsize = poison_size;
1469
#endif
1470
  mstate ms = (mstate)msp;
76,810,153✔
1471
  void *mem;
76,810,153✔
1472
  size_t nb;
76,810,153✔
1473
  if (nsize <= MAX_SMALL_REQUEST) {
76,810,153✔
1474
    bindex_t idx;
76,488,996✔
1475
    binmap_t smallbits;
76,488,996✔
1476
    nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
76,488,996✔
1477
    idx = small_index(nb);
76,488,996✔
1478
    smallbits = ms->smallmap >> idx;
76,488,996✔
1479

1480
    if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
76,488,996✔
1481
      mchunkptr b, p;
421,305✔
1482
      idx += ~smallbits & 1;       /* Uses next bin if idx empty */
421,305✔
1483
      b = smallbin_at(ms, idx);
421,305✔
1484
      p = b->fd;
421,305✔
1485
      unlink_first_small_chunk(ms, b, p, idx);
421,305✔
1486
      set_inuse_and_pinuse(ms, p, small_index2size(idx));
421,305✔
1487
      mem = chunk2mem(p);
421,305✔
1488
#if LUAJIT_USE_ASAN
1489
      mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1490
#endif
1491
      return mem;
421,305✔
1492
    } else if (nb > ms->dvsize) {
76,067,691✔
1493
      if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
687,702✔
1494
        mchunkptr b, p, r;
130,754✔
1495
        size_t rsize;
130,754✔
1496
        binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
130,754✔
1497
        bindex_t i = lj_ffs(leftbits);
130,754✔
1498
        b = smallbin_at(ms, i);
130,754✔
1499
        p = b->fd;
130,754✔
1500
        unlink_first_small_chunk(ms, b, p, i);
130,754✔
1501
        rsize = small_index2size(i) - nb;
130,754✔
1502
        /* Fit here cannot be remainderless if 4byte sizes */
1503
        if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
130,754✔
1504
          set_inuse_and_pinuse(ms, p, small_index2size(i));
24,341✔
1505
        } else {
1506
          set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
106,413✔
1507
          r = chunk_plus_offset(p, nb);
106,413✔
1508
          set_size_and_pinuse_of_free_chunk(r, rsize);
106,413✔
1509
          replace_dv(ms, r, rsize);
106,413✔
1510
        }
1511
        mem = chunk2mem(p);
130,754✔
1512
#if LUAJIT_USE_ASAN
1513
  mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1514
#endif
1515
        return mem;
130,754✔
1516
      } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
556,948✔
1517
#if LUAJIT_USE_ASAN
1518
  mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1519
#endif
1520
        return mem;
1521
      }
1522
    }
1523
  } else if (nsize >= MAX_REQUEST) {
321,157✔
1524
    nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1525
  } else {
1526
    nb = pad_request(nsize);
321,157✔
1527
    if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
321,157✔
1528
#if LUAJIT_USE_ASAN
1529
      mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1530
#endif
1531
      return mem;
1532
    }
1533
  }
1534

1535
  if (nb <= ms->dvsize) {
75,869,313✔
1536
    size_t rsize = ms->dvsize - nb;
75,439,935✔
1537
    mchunkptr p = ms->dv;
75,439,935✔
1538
    if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
75,439,935✔
1539
      mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
75,291,331✔
1540
      ms->dvsize = rsize;
75,291,331✔
1541
      set_size_and_pinuse_of_free_chunk(r, rsize);
75,291,331✔
1542
      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
75,291,331✔
1543
    } else { /* exhaust dv */
1544
      size_t dvs = ms->dvsize;
148,604✔
1545
      ms->dvsize = 0;
148,604✔
1546
      ms->dv = 0;
148,604✔
1547
      set_inuse_and_pinuse(ms, p, dvs);
148,604✔
1548
    }
1549
    mem = chunk2mem(p);
75,439,935✔
1550
#if LUAJIT_USE_ASAN
1551
    mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1552
#endif
1553
    return mem;
75,439,935✔
1554
  } else if (nb < ms->topsize) { /* Split top */
429,378✔
1555
    size_t rsize = ms->topsize -= nb;
408,450✔
1556
    mchunkptr p = ms->top;
408,450✔
1557
    mchunkptr r = ms->top = chunk_plus_offset(p, nb);
408,450✔
1558
    r->head = rsize | PINUSE_BIT;
408,450✔
1559
    set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
408,450✔
1560
    mem = chunk2mem(p);
408,450✔
1561
#if LUAJIT_USE_ASAN
1562
    mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1563
#endif
1564
    return mem;
408,450✔
1565
  }
1566
#if LUAJIT_USE_ASAN
1567
  return mark_memory_region(alloc_sys(ms, nb) - REDZONE_SIZE, mem_size, poison_size);
1568
#else
1569
  return alloc_sys(ms, nb);
20,928✔
1570
#endif
1571
}
1572

1573
static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
76,831,006✔
1574
{
1575
#if LUAJIT_USE_ASAN
1576
  if (ptr != 0) {    
1577
    size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1578
    size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1579

1580
    memmove(ptr, ptr, mem_size);
1581
    ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE, poison_size);
1582
  }
1583
  return NULL;
1584
#else
1585
  if (ptr != 0) {
76,831,006✔
1586
    mchunkptr p = mem2chunk(ptr);
76,802,429✔
1587
    mstate fm = (mstate)msp;
76,802,429✔
1588
    size_t psize = chunksize(p);
76,802,429✔
1589
    mchunkptr next = chunk_plus_offset(p, psize);
76,802,429✔
1590
    if (!pinuse(p)) {
76,802,429✔
1591
      size_t prevsize = p->prev_foot;
2,632,622✔
1592
      if ((prevsize & IS_DIRECT_BIT) != 0) {
2,632,622✔
1593
        prevsize &= ~IS_DIRECT_BIT;
2,964✔
1594
        psize += prevsize + DIRECT_FOOT_PAD;
2,964✔
1595
        CALL_MUNMAP((char *)p - prevsize, psize);
2,964✔
1596
        return NULL;
2,964✔
1597
      } else {
1598
        mchunkptr prev = chunk_minus_offset(p, prevsize);
2,629,658✔
1599
        psize += prevsize;
2,629,658✔
1600
        p = prev;
2,629,658✔
1601
        /* consolidate backward */
1602
        if (p != fm->dv) {
2,629,658✔
1603
          unlink_chunk(fm, p, prevsize);
2,570,359✔
1604
        } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
128,636✔
1605
          fm->dvsize = psize;
3,968✔
1606
          set_free_with_pinuse(p, psize, next);
3,968✔
1607
          return NULL;
3,968✔
1608
        }
1609
      }
1610
    }
1611
    if (!cinuse(next)) {  /* consolidate forward */
76,795,497✔
1612
      if (next == fm->top) {
73,557,812✔
1613
        size_t tsize = fm->topsize += psize;
27,209✔
1614
        fm->top = p;
27,209✔
1615
        p->head = tsize | PINUSE_BIT;
27,209✔
1616
        if (p == fm->dv) {
27,209✔
1617
          fm->dv = 0;
231✔
1618
          fm->dvsize = 0;
231✔
1619
        }
1620
        if (tsize > fm->trim_check)
27,209✔
1621
          alloc_trim(fm, 0);
21✔
1622
        return NULL;
27,209✔
1623
      } else if (next == fm->dv) {
73,530,603✔
1624
        size_t dsize = fm->dvsize += psize;
614,547✔
1625
        fm->dv = p;
614,547✔
1626
        set_size_and_pinuse_of_free_chunk(p, dsize);
614,547✔
1627
        return NULL;
614,547✔
1628
      } else {
1629
        size_t nsize = chunksize(next);
72,916,056✔
1630
        psize += nsize;
72,916,056✔
1631
        unlink_chunk(fm, next, nsize);
72,986,129✔
1632
        set_size_and_pinuse_of_free_chunk(p, psize);
72,916,056✔
1633
        if (p == fm->dv) {
72,916,056✔
1634
          fm->dvsize = psize;
124,437✔
1635
          return NULL;
124,437✔
1636
        }
1637
      }
1638
    } else {
1639
      set_free_with_pinuse(p, psize, next);
3,237,685✔
1640
    }
1641

1642
    if (is_small(psize)) {
76,029,304✔
1643
      insert_small_chunk(fm, p, psize);
4,776,792✔
1644
    } else {
1645
      tchunkptr tp = (tchunkptr)p;
71,252,512✔
1646
      insert_large_chunk(fm, tp, psize);
75,861,160✔
1647
      if (--fm->release_checks == 0)
71,252,512✔
1648
        release_unused_segments(fm);
279,218✔
1649
    }
1650
  }
1651
  return NULL;
1652
#endif
1653
}
1654

1655
static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
122,449✔
1656
{
1657
#if LUAJIT_USE_ASAN
1658
  if (nsize >= MAX_REQUEST)
1659
    return NULL;
1660

1661
  mstate m = (mstate)msp;
1662

1663
  size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1664
  size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1665

1666
  void *newmem = lj_alloc_malloc(m, nsize);
1667

1668
  if (newmem == NULL)
1669
    return NULL;
1670

1671
  memcpy(newmem, ptr, nsize > mem_size ? mem_size : nsize);
1672
  ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE, poison_size);
1673
  return newmem;
1674
#else
1675
  if (nsize >= MAX_REQUEST) {
122,449✔
1676
    return NULL;
1677
  } else {
1678
    mstate m = (mstate)msp;
122,449✔
1679
    mchunkptr oldp = mem2chunk(ptr);
122,449✔
1680
    size_t oldsize = chunksize(oldp);
122,449✔
1681
    mchunkptr next = chunk_plus_offset(oldp, oldsize);
122,449✔
1682
    mchunkptr newp = 0;
122,449✔
1683
    size_t nb = request2size(nsize);
122,449✔
1684

1685
    /* Try to either shrink or extend into top. Else malloc-copy-free */
1686
    if (is_direct(oldp)) {
122,449✔
1687
      newp = direct_resize(oldp, nb);  /* this may return NULL. */
112✔
1688
    } else if (oldsize >= nb) { /* already big enough */
122,337✔
1689
      size_t rsize = oldsize - nb;
653✔
1690
      newp = oldp;
653✔
1691
      if (rsize >= MIN_CHUNK_SIZE) {
653✔
1692
        mchunkptr rem = chunk_plus_offset(newp, nb);
646✔
1693
        set_inuse(m, newp, nb);
646✔
1694
        set_inuse(m, rem, rsize);
646✔
1695
        lj_alloc_free(m, chunk2mem(rem));
646✔
1696
      }
1697
    } else if (next == m->top && oldsize + m->topsize > nb) {
121,684✔
1698
      /* Expand into top */
1699
      size_t newsize = oldsize + m->topsize;
490✔
1700
      size_t newtopsize = newsize - nb;
490✔
1701
      mchunkptr newtop = chunk_plus_offset(oldp, nb);
490✔
1702
      set_inuse(m, oldp, nb);
490✔
1703
      newtop->head = newtopsize |PINUSE_BIT;
490✔
1704
      m->top = newtop;
490✔
1705
      m->topsize = newtopsize;
490✔
1706
      newp = oldp;
490✔
1707
    }
1708

1709
    if (newp != 0) {
1,255✔
1710
      return chunk2mem(newp);
1,252✔
1711
    } else {
1712
      void *newmem = lj_alloc_malloc(m, nsize);
121,197✔
1713
      if (newmem != 0) {
121,197✔
1714
        size_t oc = oldsize - overhead_for(oldp);
121,197✔
1715
        memcpy(newmem, ptr, oc < nsize ? oc : nsize);
121,197✔
1716
        lj_alloc_free(m, ptr);
121,197✔
1717
      }
1718
      return newmem;
121,197✔
1719
    }
1720
  }
1721
#endif
1722
}
1723

1724
void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
153,520,568✔
1725
{
1726
  (void)osize;
153,520,568✔
1727
  if (nsize == 0) {
153,520,568✔
1728
    return lj_alloc_free(msp, ptr);
76,709,163✔
1729
  } else if (ptr == NULL) {
76,811,405✔
1730
    return lj_alloc_malloc(msp, nsize);
76,688,956✔
1731
  } else {
1732
    return lj_alloc_realloc(msp, ptr, nsize);
122,449✔
1733
  }
1734
}
1735

1736
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc