• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 9515514390

14 Jun 2024 11:40AM UTC coverage: 92.608%. First build
9515514390

push

github

mandesero
Add ASAN Instrumentation for lj_alloc_(malloc/free/realloc) with Corresponding Tests

5660 of 6018 branches covered (94.05%)

Branch coverage included in aggregate %.

1 of 2 new or added lines in 1 file covered. (50.0%)

21600 of 23418 relevant lines covered (92.24%)

2963910.25 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.13
/src/lj_alloc.c
1
/*
2
** Bundled memory allocator.
3
**
4
** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5
** The original bears the following remark:
6
**
7
**   This is a version (aka dlmalloc) of malloc/free/realloc written by
8
**   Doug Lea and released to the public domain, as explained at
9
**   http://creativecommons.org/licenses/publicdomain.
10
**
11
**   * Version pre-2.8.4 Wed Mar 29 19:46:29 2006    (dl at gee)
12
**
13
** No additional copyright is claimed over the customizations.
14
** Please do NOT bother the original author about this version here!
15
**
16
** If you want to use dlmalloc in another project, you should get
17
** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18
** For thread-safe derivatives, take a look at:
19
** - ptmalloc: http://www.malloc.de/
20
** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21
*/
22

23
#define lj_alloc_c
24
#define LUA_CORE
25

26
/* To get the mremap prototype. Must be defined before any system includes. */
27
#if defined(__linux__) && !defined(_GNU_SOURCE)
28
#define _GNU_SOURCE
29
#endif
30

31
#include "lj_def.h"
32
#include "lj_arch.h"
33
#include "lj_alloc.h"
34

35
#ifndef LUAJIT_USE_SYSMALLOC
36

37
#define MAX_SIZE_T                (~(size_t)0)
38
#define MALLOC_ALIGNMENT        ((size_t)8U)
39

40
#define DEFAULT_GRANULARITY        ((size_t)128U * (size_t)1024U)
41
#define DEFAULT_TRIM_THRESHOLD        ((size_t)2U * (size_t)1024U * (size_t)1024U)
42
#define DEFAULT_MMAP_THRESHOLD        ((size_t)128U * (size_t)1024U)
43
#define MAX_RELEASE_CHECK_RATE        255
44

45
/* ------------------- size_t and alignment properties -------------------- */
46

47
/* The byte and bit size of a size_t */
48
#define SIZE_T_SIZE                (sizeof(size_t))
49
#define SIZE_T_BITSIZE                (sizeof(size_t) << 3)
50

51
/* Some constants coerced to size_t */
52
/* Annoying but necessary to avoid errors on some platforms */
53
#define SIZE_T_ZERO                ((size_t)0)
54
#define SIZE_T_ONE                ((size_t)1)
55
#define SIZE_T_TWO                ((size_t)2)
56
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
57
#define FOUR_SIZE_T_SIZES        (SIZE_T_SIZE<<2)
58
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59

60
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
61
#define CHUNK_ALIGN_MASK        (MALLOC_ALIGNMENT - SIZE_T_ONE)
62

63
/* the number of bytes to offset an address to align it */
64
#define align_offset(A)\
65
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67

68
/* -------------------------- MMAP support ------------------------------- */
69

70
#define MFAIL                        ((void *)(MAX_SIZE_T))
71
#define CMFAIL                        ((char *)(MFAIL)) /* defined for convenience */
72

73
#define IS_DIRECT_BIT                (SIZE_T_ONE)
74

75

76
/* Determine system-specific block allocation method. */
77
#if LJ_TARGET_WINDOWS
78

79
#define WIN32_LEAN_AND_MEAN
80
#include <windows.h>
81

82
#define LJ_ALLOC_VIRTUALALLOC        1
83

84
#if LJ_64 && !LJ_GC64
85
#define LJ_ALLOC_NTAVM                1
86
#endif
87

88
#else
89

90
#include <errno.h>
91
/* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
92
#include <sys/mman.h>
93

94
#define LJ_ALLOC_MMAP                1
95

96
#if LJ_64
97

98
#define LJ_ALLOC_MMAP_PROBE        1
99

100
#if LJ_GC64
101
#define LJ_ALLOC_MBITS                47        /* 128 TB in LJ_GC64 mode. */
102
#elif LJ_TARGET_X64 && LJ_HASJIT
103
/* Due to limitations in the x64 compiler backend. */
104
#define LJ_ALLOC_MBITS                31        /* 2 GB on x64 with !LJ_GC64. */
105
#else
106
#define LJ_ALLOC_MBITS                32        /* 4 GB on other archs with !LJ_GC64. */
107
#endif
108

109
#endif
110

111
#if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
112
#define LJ_ALLOC_MMAP32                1
113
#endif
114

115
#if LJ_TARGET_LINUX
116
#define LJ_ALLOC_MREMAP                1
117
#endif
118

119
#endif
120

121

122
#if LJ_ALLOC_VIRTUALALLOC
123

124
#if LJ_ALLOC_NTAVM
125
/* Undocumented, but hey, that's what we all love so much about Windows. */
126
typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
127
                       size_t *size, ULONG alloctype, ULONG prot);
128
static PNTAVM ntavm;
129

130
/* Number of top bits of the lower 32 bits of an address that must be zero.
131
** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
132
*/
133
#define NTAVM_ZEROBITS                1
134

135
static void init_mmap(void)
136
{
137
  ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
138
                                 "NtAllocateVirtualMemory");
139
}
140
#define INIT_MMAP()        init_mmap()
141

142
/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
143
static void *CALL_MMAP(size_t size)
144
{
145
  DWORD olderr = GetLastError();
146
  void *ptr = NULL;
147
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
148
                  MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
149
  SetLastError(olderr);
150
  return st == 0 ? ptr : MFAIL;
151
}
152

153
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
154
static void *DIRECT_MMAP(size_t size)
155
{
156
  DWORD olderr = GetLastError();
157
  void *ptr = NULL;
158
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
159
                  MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
160
  SetLastError(olderr);
161
  return st == 0 ? ptr : MFAIL;
162
}
163

164
#else
165

166
/* Win32 MMAP via VirtualAlloc */
167
static void *CALL_MMAP(size_t size)
168
{
169
  DWORD olderr = GetLastError();
170
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
171
  SetLastError(olderr);
172
  return ptr ? ptr : MFAIL;
173
}
174

175
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
176
static void *DIRECT_MMAP(size_t size)
177
{
178
  DWORD olderr = GetLastError();
179
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
180
                            PAGE_READWRITE);
181
  SetLastError(olderr);
182
  return ptr ? ptr : MFAIL;
183
}
184

185
#endif
186

187
/* This function supports releasing coalesed segments */
188
static int CALL_MUNMAP(void *ptr, size_t size)
189
{
190
  DWORD olderr = GetLastError();
191
  MEMORY_BASIC_INFORMATION minfo;
192
  char *cptr = (char *)ptr;
193
  while (size) {
194
    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
195
      return -1;
196
    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
197
        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
198
      return -1;
199
    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
200
      return -1;
201
    cptr += minfo.RegionSize;
202
    size -= minfo.RegionSize;
203
  }
204
  SetLastError(olderr);
205
  return 0;
206
}
207

208
#elif LJ_ALLOC_MMAP
209

210
#define MMAP_PROT                (PROT_READ|PROT_WRITE)
211
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
212
#define MAP_ANONYMOUS                MAP_ANON
213
#endif
214
#define MMAP_FLAGS                (MAP_PRIVATE|MAP_ANONYMOUS)
215

216
#if LJ_ALLOC_MMAP_PROBE
217

218
#ifdef MAP_TRYFIXED
219
#define MMAP_FLAGS_PROBE        (MMAP_FLAGS|MAP_TRYFIXED)
220
#else
221
#define MMAP_FLAGS_PROBE        MMAP_FLAGS
222
#endif
223

224
#define LJ_ALLOC_MMAP_PROBE_MAX                30
225
#define LJ_ALLOC_MMAP_PROBE_LINEAR        5
226

227
#define LJ_ALLOC_MMAP_PROBE_LOWER        ((uintptr_t)0x4000)
228

229
/* No point in a giant ifdef mess. Just try to open /dev/urandom.
230
** It doesn't really matter if this fails, since we get some ASLR bits from
231
** every unsuitable allocation, too. And we prefer linear allocation, anyway.
232
*/
233
#include <fcntl.h>
234
#include <unistd.h>
235

236
#if LUAJIT_USE_ASAN
237

238
/*
239
** The work of asan (AddressSanitizer) is to detect memory errors during program execution.
240
** One way to achieve this is by adding redzones around memory allocations. The redzone is a
241
** specially allocated area of memory before and after the allocated block, which is filled
242
** with a unique value. If the program tries to access memory outside of the allocation,
243
** asan detects this attempt and generates an error message, allowing the developer to
244
** detect and fix the issue early.
245
**
246
** - Original paper: https://www.usenix.org/system/files/conference/atc12/atc12-final39.pdf
247
**
248
** LuaJIT ASAN instrumentation (mmap and others):
249
**
250
** - Memory map around allocation:
251
** -------------------------------------------------------------------------------------
252
** .. .. | [f7]    ...    [f7] | [00]     ...     [0(0-7)] | [f7]    ...    [f7] | .. ..
253
**       |    left redzone     |           data            |    right redzone    |
254
**       |  REDZONE_SIZE bytes |          N bytes          |  REDZONE_SIZE bytes |
255
** ------------------------------------------------------------------------------------- 
256
**
257
** left redzone: 
258
**  The first SIZE_T_SIZE bytes of the redzone contain the data size N, the next SIZE_T_SIZE bytes 
259
**  of the redzone contain the full size of the allocation, including the alignment of the size N 
260
**  and the size of the redzones themselves.
261
*/
262

263
#include <sanitizer/asan_interface.h>
264

265
/* Recommended redzone size from 16 to 2048 bytes (must be a a power of two) 
266
** https://github.com/google/sanitizers/wiki/AddressSanitizerFlags
267
*/
268
#define REDZONE_SIZE FOUR_SIZE_T_SIZES
269

270
/* Total redzone size around allocation */
271
#define TOTAL_REDZONE_SIZE (REDZONE_SIZE << 1)
272

273
/* Multiple of the allocated memory size */
274
#define SIZE_ALIGNMENT MALLOC_ALIGNMENT
275

276
/* Multiple of the allocated memory address */
277
#define ADDR_ALIGNMENT MALLOC_ALIGNMENT
278

279
/* Casting to the nearest multiple of alignment from above */
280
void *align_up(void *ptr, size_t alignment)
281
{
282
  uintptr_t p = (uintptr_t)ptr;
283
  return (void *)((p + alignment - 1) & ~(alignment - 1));
284
}
285

286
void *mark_memory_region(void *ptr, size_t mem_size, size_t poison_size)
287
{
288
  if (ptr == NULL)
289
    return NULL;
290
  size_t *sptr = (size_t *)ptr;
291
  ASAN_UNPOISON_MEMORY_REGION(ptr, TWO_SIZE_T_SIZES);
292
  sptr[0] = mem_size;
293
  sptr[1] = poison_size;
294
  ASAN_POISON_MEMORY_REGION(ptr, poison_size);
295
  ptr += REDZONE_SIZE;
296
  ASAN_UNPOISON_MEMORY_REGION(ptr, mem_size);
297
  return ptr;
298
}
299

300
typedef enum {
301
  MEM_SIZE,
302
  POISON_SIZE
303
} SizeType;
304

305
size_t asan_get_size(void *ptr, SizeType type)
306
{
307
  size_t offset = (type == MEM_SIZE) ? 0 : SIZE_T_SIZE;
308
  ASAN_UNPOISON_MEMORY_REGION(ptr - REDZONE_SIZE + offset, SIZE_T_SIZE);
309
  size_t size = *((size_t *)(ptr - REDZONE_SIZE + offset));
310
  ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE + offset, SIZE_T_SIZE);
311
  return size;
312
}
313

314
#endif
315

316
static uintptr_t mmap_probe_seed(void)
×
317
{
318
  uintptr_t val;
×
319
  int fd = open("/dev/urandom", O_RDONLY);
×
320
  if (fd != -1) {
×
321
    int ok = ((size_t)read(fd, &val, sizeof(val)) == sizeof(val));
×
322
    (void)close(fd);
×
323
    if (ok) return val;
×
324
  }
325
  return 1;  /* Punt. */
326
}
327

328
static void *mmap_probe(size_t size)
37,709✔
329
{
330
  /* Hint for next allocation. Doesn't need to be thread-safe. */
331
  static uintptr_t hint_addr = 0;
37,709✔
332
  static uintptr_t hint_prng = 0;
37,709✔
333
  int olderr = errno;
37,709✔
334
  int retry;
37,709✔
335
#if LUAJIT_USE_ASAN
336
  size_t mem_size = size;
337
  size = (size_t)align_up((void *)size, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
338
#endif
339
  for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
37,709✔
340
    void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
37,709✔
341
    uintptr_t addr = (uintptr_t)p;
37,709✔
342
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER &&
37,709✔
343
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
37,709✔
344
      /* We got a suitable address. Bump the hint address. */
345
      hint_addr = addr + size;
37,709✔
346
      errno = olderr;
37,709✔
347
#if LUAJIT_USE_ASAN
348
      p = mark_memory_region(p, mem_size, size);
349
#endif
350
      return p;
37,709✔
351
    }
352
    if (p != MFAIL) {
×
353
      munmap(p, size);
×
354
    } else if (errno == ENOMEM) {
×
355
      return MFAIL;
356
    }
357
    if (hint_addr) {
×
358
      /* First, try linear probing. */
359
      if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
360
        hint_addr += 0x1000000;
×
361
        if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
×
362
          hint_addr = 0;
×
363
        continue;
×
364
      } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
365
        /* Next, try a no-hint probe to get back an ASLR address. */
366
        hint_addr = 0;
×
367
        continue;
×
368
      }
369
    }
370
    /* Finally, try pseudo-random probing. */
371
    if (LJ_UNLIKELY(hint_prng == 0)) {
×
372
      hint_prng = mmap_probe_seed();
×
373
    }
374
    /* The unsuitable address we got has some ASLR PRNG bits. */
375
    hint_addr ^= addr & ~((uintptr_t)(LJ_PAGESIZE-1));
×
376
    do {  /* The PRNG itself is very weak, but see above. */
×
377
      hint_prng = hint_prng * 1103515245 + 12345;
×
378
      hint_addr ^= hint_prng * (uintptr_t)LJ_PAGESIZE;
×
379
      hint_addr &= (((uintptr_t)1 << LJ_ALLOC_MBITS)-1);
×
380
    } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
×
381
  }
382
  errno = olderr;
×
383
  return MFAIL;
×
384
}
385

386
#endif
387

388
#if LJ_ALLOC_MMAP32
389

390
#if defined(__sun__)
391
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0x1000)
392
#else
393
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0)
394
#endif
395

396
static void *mmap_map32(size_t size)
397
{
398
#if LJ_ALLOC_MMAP_PROBE
399
  static int fallback = 0;
400
  if (fallback)
401
    return mmap_probe(size);
402
#endif
403
  {
404
    int olderr = errno;
405
#if LUAJIT_USE_ASAN
406
    size_t mem_size = size;
407
    size = (size_t)align_up((void *)size, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
408
#endif
409
    void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
410
#if LUAJIT_USE_ASAN
411
    if (ptr != MFAIL)
412
      ptr = mark_memory_region(ptr, mem_size, size);
413

414
    size = mem_size;
415
#endif
416
    errno = olderr;
417
    /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
418
#if LJ_ALLOC_MMAP_PROBE
419
    if (ptr == MFAIL) {
420
      fallback = 1;
421
      return mmap_probe(size);
422
    }
423
#endif
424
    return ptr;
425
  }
426
}
427

428
#endif
429

430
#if LJ_ALLOC_MMAP32
431
#define CALL_MMAP(size)                mmap_map32(size)
432
#elif LJ_ALLOC_MMAP_PROBE
433
#define CALL_MMAP(size)                mmap_probe(size)
434
#else
435
static void *CALL_MMAP(size_t size)
436
{
437
  int olderr = errno;
438
#if LUAJIT_USE_ASAN
439
  size_t mem_size = size;
440
  size = (size_t)align_up((void *)size, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
441
#endif
442
  void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
443
  errno = olderr;
444
#if LUAJIT_USE_ASAN
445
  ptr = mark_memory_region(ptr, mem_size, size);
446
#endif
447
  return ptr;
448
}
449
#endif
450

451
#if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
452

453
#include <sys/resource.h>
454

455
static void init_mmap(void)
456
{
457
  struct rlimit rlim;
458
  rlim.rlim_cur = rlim.rlim_max = 0x10000;
459
  setrlimit(RLIMIT_DATA, &rlim);  /* Ignore result. May fail later. */
460
}
461
#define INIT_MMAP()        init_mmap()
462

463
#endif
464

465
static int CALL_MUNMAP(void *ptr, size_t size)
3,366✔
466
{
467
  int olderr = errno;
3,366✔
468
#if LUAJIT_USE_ASAN
469
  memmove(ptr, ptr, size); /* check that memory is not poisoned */
470
  size = asan_get_size(ptr, POISON_SIZE);
471
  ptr -= REDZONE_SIZE;
472
#endif
473
  int ret = munmap(ptr, size);
3,351✔
474
#if LUAJIT_USE_ASAN
475
  if (ret == 0) {
476
    ASAN_POISON_MEMORY_REGION(ptr, size);
477
  }
478
#endif
479
  errno = olderr;
3,366✔
480
  return ret;
3,366✔
481
}
482

483
#if LJ_ALLOC_MREMAP
484
/* Need to define _GNU_SOURCE to get the mremap prototype. */
485
static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
113✔
486
{
487
  int olderr = errno;
113✔
488
#if LUAJIT_USE_ASAN
489
  void *old_ptr = ptr;
490
  size_t nms = nsz; /* new memory size */
491
  osz = asan_get_size(old_ptr, POISON_SIZE);
492
  nsz = (size_t)align_up((void *)nsz, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
493
  ptr -= REDZONE_SIZE;
494
#endif
495
  ptr = mremap(ptr, osz, nsz, flags);
226✔
496
#if LUAJIT_USE_ASAN
497
  if (ptr != MFAIL) { 
498
    /* can return a pointer to the same memory */
499
    ASAN_POISON_MEMORY_REGION(old_ptr, osz);
500
    ptr = mark_memory_region(ptr, nms, nsz);
501
  }
502
#endif
503
  errno = olderr;
113✔
504
  return ptr;
113✔
505
}
506

507
#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
508
#define CALL_MREMAP_NOMOVE        0
509
#define CALL_MREMAP_MAYMOVE        1
510
#if LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64)
511
#define CALL_MREMAP_MV                CALL_MREMAP_NOMOVE
512
#else
513
#define CALL_MREMAP_MV                CALL_MREMAP_MAYMOVE
514
#endif
515
#endif
516

517
#endif
518

519

520
#ifndef INIT_MMAP
521
#define INIT_MMAP()                ((void)0)
522
#endif
523

524
#ifndef DIRECT_MMAP
525
#define DIRECT_MMAP(s)                CALL_MMAP(s)
526
#endif
527

528
#ifndef CALL_MREMAP
529
#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
530
#endif
531

532
/* -----------------------  Chunk representations ------------------------ */
533

534
struct malloc_chunk {
535
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
536
  size_t               head;       /* Size and inuse bits. */
537
  struct malloc_chunk *fd;         /* double links -- used only if free. */
538
  struct malloc_chunk *bk;
539
};
540

541
typedef struct malloc_chunk  mchunk;
542
typedef struct malloc_chunk *mchunkptr;
543
typedef struct malloc_chunk *sbinptr;  /* The type of bins of chunks */
544
typedef size_t bindex_t;               /* Described below */
545
typedef unsigned int binmap_t;         /* Described below */
546
typedef unsigned int flag_t;           /* The type of various bit flag sets */
547

548
/* ------------------- Chunks sizes and alignments ----------------------- */
549

550
#define MCHUNK_SIZE                (sizeof(mchunk))
551

552
#define CHUNK_OVERHEAD                (SIZE_T_SIZE)
553

554
/* Direct chunks need a second word of overhead ... */
555
#define DIRECT_CHUNK_OVERHEAD        (TWO_SIZE_T_SIZES)
556
/* ... and additional padding for fake next-chunk at foot */
557
#define DIRECT_FOOT_PAD                (FOUR_SIZE_T_SIZES)
558

559
/* The smallest size we can malloc is an aligned minimal chunk */
560
#define MIN_CHUNK_SIZE\
561
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
562

563
#if LUAJIT_USE_ASAN
564
/* conversion from malloc headers to user pointers, and back */
565
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES + REDZONE_SIZE))
566
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES - REDZONE_SIZE))
567
#else
568
/* conversion from malloc headers to user pointers, and back */
569
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
570
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
571
#endif
572
/* chunk associated with aligned address A */
573
#define align_as_chunk(A)        (mchunkptr)((A) + align_offset(chunk2mem(A)))
574

575
/* Bounds on request (not chunk) sizes. */
576
#define MAX_REQUEST                ((~MIN_CHUNK_SIZE+1) << 2)
577
#define MIN_REQUEST                (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
578

579
/* pad request bytes into a usable size */
580
#define pad_request(req) \
581
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
582

583
/* pad request, checking for minimum (but not maximum) */
584
#define request2size(req) \
585
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
586

587
/* ------------------ Operations on head and foot fields ----------------- */
588

589
#define PINUSE_BIT                (SIZE_T_ONE)
590
#define CINUSE_BIT                (SIZE_T_TWO)
591
#define INUSE_BITS                (PINUSE_BIT|CINUSE_BIT)
592

593
/* Head value for fenceposts */
594
#define FENCEPOST_HEAD                (INUSE_BITS|SIZE_T_SIZE)
595

596
/* extraction of fields from head words */
597
#define cinuse(p)                ((p)->head & CINUSE_BIT)
598
#define pinuse(p)                ((p)->head & PINUSE_BIT)
599
#define chunksize(p)                ((p)->head & ~(INUSE_BITS))
600

601
#define clear_pinuse(p)                ((p)->head &= ~PINUSE_BIT)
602
#define clear_cinuse(p)                ((p)->head &= ~CINUSE_BIT)
603

604
/* Treat space at ptr +/- offset as a chunk */
605
#define chunk_plus_offset(p, s)                ((mchunkptr)(((char *)(p)) + (s)))
606
#define chunk_minus_offset(p, s)        ((mchunkptr)(((char *)(p)) - (s)))
607

608
/* Ptr to next or previous physical malloc_chunk. */
609
#define next_chunk(p)        ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
610
#define prev_chunk(p)        ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
611

612
/* extract next chunk's pinuse bit */
613
#define next_pinuse(p)        ((next_chunk(p)->head) & PINUSE_BIT)
614

615
/* Get/set size at footer */
616
#define get_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot)
617
#define set_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
618

619
/* Set size, pinuse bit, and foot */
620
#define set_size_and_pinuse_of_free_chunk(p, s)\
621
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
622

623
/* Set size, pinuse bit, foot, and clear next pinuse */
624
#define set_free_with_pinuse(p, s, n)\
625
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
626

627
#define is_direct(p)\
628
  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
629

630
/* Get the internal overhead associated with chunk p */
631
#define overhead_for(p)\
632
 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
633

634
/* ---------------------- Overlaid data structures ----------------------- */
635

636
struct malloc_tree_chunk {
637
  /* The first four fields must be compatible with malloc_chunk */
638
  size_t                    prev_foot;
639
  size_t                    head;
640
  struct malloc_tree_chunk *fd;
641
  struct malloc_tree_chunk *bk;
642

643
  struct malloc_tree_chunk *child[2];
644
  struct malloc_tree_chunk *parent;
645
  bindex_t                  index;
646
};
647

648
typedef struct malloc_tree_chunk  tchunk;
649
typedef struct malloc_tree_chunk *tchunkptr;
650
typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
651

652
/* A little helper macro for trees */
653
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
654

655
/* ----------------------------- Segments -------------------------------- */
656

657
struct malloc_segment {
658
  char        *base;             /* base address */
659
  size_t       size;             /* allocated size */
660
  struct malloc_segment *next;   /* ptr to next segment */
661
};
662

663
typedef struct malloc_segment  msegment;
664
typedef struct malloc_segment *msegmentptr;
665

666
/* ---------------------------- malloc_state ----------------------------- */
667

668
/* Bin types, widths and sizes */
669
#define NSMALLBINS                (32U)
670
#define NTREEBINS                (32U)
671
#define SMALLBIN_SHIFT                (3U)
672
#define SMALLBIN_WIDTH                (SIZE_T_ONE << SMALLBIN_SHIFT)
673
#define TREEBIN_SHIFT                (8U)
674
#define MIN_LARGE_SIZE                (SIZE_T_ONE << TREEBIN_SHIFT)
675
#define MAX_SMALL_SIZE                (MIN_LARGE_SIZE - SIZE_T_ONE)
676
#define MAX_SMALL_REQUEST  (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
677

678
struct malloc_state {
679
  binmap_t   smallmap;
680
  binmap_t   treemap;
681
  size_t     dvsize;
682
  size_t     topsize;
683
  mchunkptr  dv;
684
  mchunkptr  top;
685
  size_t     trim_check;
686
  size_t     release_checks;
687
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
688
  tbinptr    treebins[NTREEBINS];
689
  msegment   seg;
690
};
691

692
typedef struct malloc_state *mstate;
693

694
#define is_initialized(M)        ((M)->top != 0)
695

696
/* -------------------------- system alloc setup ------------------------- */
697

698
/* page-align a size */
699
#define page_align(S)\
700
 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
701

702
/* granularity-align a size */
703
#define granularity_align(S)\
704
  (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
705
   & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
706

707
#if LJ_TARGET_WINDOWS
708
#define mmap_align(S)        granularity_align(S)
709
#else
710
#define mmap_align(S)        page_align(S)
711
#endif
712

713
/*  True if segment S holds address A */
714
#define segment_holds(S, A)\
715
  ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
716

717
/* Return segment holding given address */
718
static msegmentptr segment_holding(mstate m, char *addr)
76✔
719
{
720
  msegmentptr sp = &m->seg;
76✔
721
  for (;;) {
76✔
722
    if (addr >= sp->base && addr < sp->base + sp->size)
76✔
723
      return sp;
724
    if ((sp = sp->next) == 0)
×
725
      return 0;
726
  }
727
}
728

729
/* Return true if segment contains a segment link */
730
static int has_segment_link(mstate m, msegmentptr ss)
22✔
731
{
732
  msegmentptr sp = &m->seg;
22✔
733
  for (;;) {
66✔
734
    if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
66✔
735
      return 1;
736
    if ((sp = sp->next) == 0)
66✔
737
      return 0;
738
  }
739
}
740

741
/*
742
  TOP_FOOT_SIZE is padding at the end of a segment, including space
743
  that may be needed to place segment records and fenceposts when new
744
  noncontiguous segments are added.
745
*/
746
#define TOP_FOOT_SIZE\
747
  (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
748

749
/* ---------------------------- Indexing Bins ---------------------------- */
750

751
#define is_small(s)                (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
752
#define small_index(s)                ((s)  >> SMALLBIN_SHIFT)
753
#define small_index2size(i)        ((i)  << SMALLBIN_SHIFT)
754
#define MIN_SMALL_INDEX                (small_index(MIN_CHUNK_SIZE))
755

756
/* addressing by index. See above about smallbin repositioning */
757
#define smallbin_at(M, i)        ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
758
#define treebin_at(M,i)                (&((M)->treebins[i]))
759

760
/* assign tree index for size S to variable I */
761
#define compute_tree_index(S, I)\
762
{\
763
  unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
764
  if (X == 0) {\
765
    I = 0;\
766
  } else if (X > 0xFFFF) {\
767
    I = NTREEBINS-1;\
768
  } else {\
769
    unsigned int K = lj_fls(X);\
770
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
771
  }\
772
}
773

774
/* Bit representing maximum resolved size in a treebin at i */
775
#define bit_for_tree_index(i) \
776
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
777

778
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
779
#define leftshift_for_tree_index(i) \
780
   ((i == NTREEBINS-1)? 0 : \
781
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
782

783
/* The size of the smallest chunk held in bin with index i */
784
#define minsize_for_tree_index(i) \
785
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
786
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
787

788
/* ------------------------ Operations on bin maps ----------------------- */
789

790
/* bit corresponding to given index */
791
#define idx2bit(i)                ((binmap_t)(1) << (i))
792

793
/* Mark/Clear bits with given index */
794
#define mark_smallmap(M,i)        ((M)->smallmap |=  idx2bit(i))
795
#define clear_smallmap(M,i)        ((M)->smallmap &= ~idx2bit(i))
796
#define smallmap_is_marked(M,i)        ((M)->smallmap &   idx2bit(i))
797

798
#define mark_treemap(M,i)        ((M)->treemap  |=  idx2bit(i))
799
#define clear_treemap(M,i)        ((M)->treemap  &= ~idx2bit(i))
800
#define treemap_is_marked(M,i)        ((M)->treemap  &   idx2bit(i))
801

802
/* mask with all bits to left of least bit of x on */
803
#define left_bits(x)                ((x<<1) | (~(x<<1)+1))
804

805
/* Set cinuse bit and pinuse bit of next chunk */
806
#define set_inuse(M,p,s)\
807
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
808
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
809

810
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
811
#define set_inuse_and_pinuse(M,p,s)\
812
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
813
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
814

815
/* Set size, cinuse and pinuse bit of this chunk */
816
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
817
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
818

819
/* ----------------------- Operations on smallbins ----------------------- */
820

821
/* Link a free chunk into a smallbin  */
822
#define insert_small_chunk(M, P, S) {\
823
  bindex_t I = small_index(S);\
824
  mchunkptr B = smallbin_at(M, I);\
825
  mchunkptr F = B;\
826
  if (!smallmap_is_marked(M, I))\
827
    mark_smallmap(M, I);\
828
  else\
829
    F = B->fd;\
830
  B->fd = P;\
831
  F->bk = P;\
832
  P->fd = F;\
833
  P->bk = B;\
834
}
835

836
/* Unlink a chunk from a smallbin  */
837
#define unlink_small_chunk(M, P, S) {\
838
  mchunkptr F = P->fd;\
839
  mchunkptr B = P->bk;\
840
  bindex_t I = small_index(S);\
841
  if (F == B) {\
842
    clear_smallmap(M, I);\
843
  } else {\
844
    F->bk = B;\
845
    B->fd = F;\
846
  }\
847
}
848

849
/* Unlink the first chunk from a smallbin */
850
#define unlink_first_small_chunk(M, B, P, I) {\
851
  mchunkptr F = P->fd;\
852
  if (B == F) {\
853
    clear_smallmap(M, I);\
854
  } else {\
855
    B->fd = F;\
856
    F->bk = B;\
857
  }\
858
}
859

860
/* Replace dv node, binning the old one */
861
/* Used only when dvsize known to be small */
862
#define replace_dv(M, P, S) {\
863
  size_t DVS = M->dvsize;\
864
  if (DVS != 0) {\
865
    mchunkptr DV = M->dv;\
866
    insert_small_chunk(M, DV, DVS);\
867
  }\
868
  M->dvsize = S;\
869
  M->dv = P;\
870
}
871

872
/* ------------------------- Operations on trees ------------------------- */
873

874
/* Insert chunk into tree */
875
#define insert_large_chunk(M, X, S) {\
876
  tbinptr *H;\
877
  bindex_t I;\
878
  compute_tree_index(S, I);\
879
  H = treebin_at(M, I);\
880
  X->index = I;\
881
  X->child[0] = X->child[1] = 0;\
882
  if (!treemap_is_marked(M, I)) {\
883
    mark_treemap(M, I);\
884
    *H = X;\
885
    X->parent = (tchunkptr)H;\
886
    X->fd = X->bk = X;\
887
  } else {\
888
    tchunkptr T = *H;\
889
    size_t K = S << leftshift_for_tree_index(I);\
890
    for (;;) {\
891
      if (chunksize(T) != S) {\
892
        tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
893
        K <<= 1;\
894
        if (*C != 0) {\
895
          T = *C;\
896
        } else {\
897
          *C = X;\
898
          X->parent = T;\
899
          X->fd = X->bk = X;\
900
          break;\
901
        }\
902
      } else {\
903
        tchunkptr F = T->fd;\
904
        T->fd = F->bk = X;\
905
        X->fd = F;\
906
        X->bk = T;\
907
        X->parent = 0;\
908
        break;\
909
      }\
910
    }\
911
  }\
912
}
913

914
#define unlink_large_chunk(M, X) {\
915
  tchunkptr XP = X->parent;\
916
  tchunkptr R;\
917
  if (X->bk != X) {\
918
    tchunkptr F = X->fd;\
919
    R = X->bk;\
920
    F->bk = R;\
921
    R->fd = F;\
922
  } else {\
923
    tchunkptr *RP;\
924
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
925
        ((R = *(RP = &(X->child[0]))) != 0)) {\
926
      tchunkptr *CP;\
927
      while ((*(CP = &(R->child[1])) != 0) ||\
928
             (*(CP = &(R->child[0])) != 0)) {\
929
        R = *(RP = CP);\
930
      }\
931
      *RP = 0;\
932
    }\
933
  }\
934
  if (XP != 0) {\
935
    tbinptr *H = treebin_at(M, X->index);\
936
    if (X == *H) {\
937
      if ((*H = R) == 0) \
938
        clear_treemap(M, X->index);\
939
    } else {\
940
      if (XP->child[0] == X) \
941
        XP->child[0] = R;\
942
      else \
943
        XP->child[1] = R;\
944
    }\
945
    if (R != 0) {\
946
      tchunkptr C0, C1;\
947
      R->parent = XP;\
948
      if ((C0 = X->child[0]) != 0) {\
949
        R->child[0] = C0;\
950
        C0->parent = R;\
951
      }\
952
      if ((C1 = X->child[1]) != 0) {\
953
        R->child[1] = C1;\
954
        C1->parent = R;\
955
      }\
956
    }\
957
  }\
958
}
959

960
/* Relays to large vs small bin operations */
961

962
#define insert_chunk(M, P, S)\
963
  if (is_small(S)) { insert_small_chunk(M, P, S)\
964
  } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
965

966
#define unlink_chunk(M, P, S)\
967
  if (is_small(S)) { unlink_small_chunk(M, P, S)\
968
  } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
969

970
/* -----------------------  Direct-mmapping chunks ----------------------- */
971

972
static void *direct_alloc(size_t nb)
2,996✔
973
{
974
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2,996✔
975
  if (LJ_LIKELY(mmsize > nb)) {     /* Check for wrap around 0 */
2,996✔
976
    char *mm = (char *)(DIRECT_MMAP(mmsize));
2,996✔
977
    if (mm != CMFAIL) {
2,996✔
978
      size_t offset = align_offset(chunk2mem(mm));
2,996✔
979
      size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
2,996✔
980
      mchunkptr p = (mchunkptr)(mm + offset);
2,996✔
981
      p->prev_foot = offset | IS_DIRECT_BIT;
2,996✔
982
      p->head = psize|CINUSE_BIT;
2,996✔
983
      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2,996✔
984
      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2,996✔
985
      return chunk2mem(p);
2,996✔
986
    }
987
  }
988
  return NULL;
989
}
990

991
static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
125✔
992
{
993
  size_t oldsize = chunksize(oldp);
125✔
994
  if (is_small(nb)) /* Can't shrink direct regions below small size */
125✔
995
    return NULL;
996
  /* Keep old chunk if big enough but not too big */
997
  if (oldsize >= nb + SIZE_T_SIZE &&
122✔
998
      (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
61✔
999
    return oldp;
1000
  } else {
1001
    size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
91✔
1002
    size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
91✔
1003
    size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
91✔
1004
    char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
91✔
1005
                                   oldmmsize, newmmsize, CALL_MREMAP_MV);
1006
    if (cp != CMFAIL) {
91✔
1007
      mchunkptr newp = (mchunkptr)(cp + offset);
91✔
1008
      size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
91✔
1009
      newp->head = psize|CINUSE_BIT;
91✔
1010
      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
91✔
1011
      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
91✔
1012
      return newp;
91✔
1013
    }
1014
  }
1015
  return NULL;
1016
}
1017

1018
/* -------------------------- mspace management -------------------------- */
1019

1020
/* Initialize top chunk and its size */
1021
static void init_top(mstate m, mchunkptr p, size_t psize)
462✔
1022
{
1023
  /* Ensure alignment */
1024
  void *t = chunk2mem(p);
462✔
1025
#if LUAJIT_USE_ASAN
1026
  t -= REDZONE_SIZE;
1027
#endif
NEW
1028
  size_t offset = align_offset(t);
×
1029

1030
  p = (mchunkptr)((char *)p + offset);
462✔
1031
  psize -= offset;
462✔
1032

1033
  m->top = p;
462✔
1034
  m->topsize = psize;
462✔
1035
  p->head = psize | PINUSE_BIT;
462✔
1036
  /* set size of fake trailing chunk holding overhead space only once */
1037
  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
462✔
1038
  m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
462✔
1039
}
82✔
1040

1041
/* Initialize bins for a new mstate that is otherwise zeroed out */
1042
static void init_bins(mstate m)
326✔
1043
{
1044
  /* Establish circular links for smallbins */
1045
  bindex_t i;
326✔
1046
  for (i = 0; i < NSMALLBINS; i++) {
10,758✔
1047
    sbinptr bin = smallbin_at(m,i);
10,432✔
1048
    bin->fd = bin->bk = bin;
10,432✔
1049
  }
1050
}
1051

1052
/* Allocate chunk and prepend remainder with chunk in successor base. */
1053
static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
34,273✔
1054
{
1055
  mchunkptr p = align_as_chunk(newbase);
34,273✔
1056
  mchunkptr oldfirst = align_as_chunk(oldbase);
34,273✔
1057
  size_t psize = (size_t)((char *)oldfirst - (char *)p);
34,273✔
1058
  mchunkptr q = chunk_plus_offset(p, nb);
34,273✔
1059
  size_t qsize = psize - nb;
34,273✔
1060
  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
34,273✔
1061

1062
  /* consolidate remainder with first chunk of old base */
1063
  if (oldfirst == m->top) {
34,273✔
1064
    size_t tsize = m->topsize += qsize;
×
1065
    m->top = q;
×
1066
    q->head = tsize | PINUSE_BIT;
×
1067
  } else if (oldfirst == m->dv) {
34,273✔
1068
    size_t dsize = m->dvsize += qsize;
×
1069
    m->dv = q;
×
1070
    set_size_and_pinuse_of_free_chunk(q, dsize);
×
1071
  } else {
1072
    if (!cinuse(oldfirst)) {
34,273✔
1073
      size_t nsize = chunksize(oldfirst);
10✔
1074
      unlink_chunk(m, oldfirst, nsize);
16✔
1075
      oldfirst = chunk_plus_offset(oldfirst, nsize);
10✔
1076
      qsize += nsize;
10✔
1077
    }
1078
    set_free_with_pinuse(q, qsize, oldfirst);
34,273✔
1079
    insert_chunk(m, q, qsize);
34,273✔
1080
  }
1081

1082
  return chunk2mem(p);
34,273✔
1083
}
1084

1085
/* Add a segment to hold a new noncontiguous region */
1086
static void add_segment(mstate m, char *tbase, size_t tsize)
54✔
1087
{
1088
  /* Determine locations and sizes of segment, fenceposts, old top */
1089
  char *old_top = (char *)m->top;
54✔
1090
  msegmentptr oldsp = segment_holding(m, old_top);
54✔
1091
#if LUAJIT_USE_ASAN
1092
  ASAN_UNPOISON_MEMORY_REGION(oldsp, sizeof(struct malloc_segment));
1093
#endif
1094
  char *old_end = oldsp->base + oldsp->size;
54✔
1095
  size_t ssize = pad_request(sizeof(struct malloc_segment));
54✔
1096
  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
54✔
1097
  size_t offset = align_offset(chunk2mem(rawsp));
54✔
1098
  char *asp = rawsp + offset;
54✔
1099
  char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
54✔
1100
  mchunkptr sp = (mchunkptr)csp;
54✔
1101
  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
54✔
1102
#if LUAJIT_USE_ASAN
1103
  ss = (msegmentptr)((void *)ss - REDZONE_SIZE);
1104
#endif
1105
  mchunkptr tnext = chunk_plus_offset(sp, ssize);
54✔
1106
  mchunkptr p = tnext;
54✔
1107

1108
  /* reset top to new space */
1109
  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
54✔
1110

1111
  /* Set up segment record */
1112
  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
54✔
1113
  *ss = m->seg; /* Push current record */
54✔
1114
  m->seg.base = tbase;
54✔
1115
  m->seg.size = tsize;
54✔
1116
  m->seg.next = ss;
54✔
1117

1118
  /* Insert trailing fenceposts */
1119
  for (;;) {
192✔
1120
    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
192✔
1121
    p->head = FENCEPOST_HEAD;
192✔
1122
    if ((char *)(&(nextp->head)) < old_end)
192✔
1123
      p = nextp;
1124
    else
1125
      break;
1126
  }
1127

1128
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1129
  if (csp != old_top) {
54✔
1130
    mchunkptr q = (mchunkptr)old_top;
40✔
1131
    size_t psize = (size_t)(csp - old_top);
40✔
1132
    mchunkptr tn = chunk_plus_offset(q, psize);
40✔
1133
    set_free_with_pinuse(q, psize, tn);
40✔
1134
    insert_chunk(m, q, psize);
51✔
1135
  }
1136
}
54✔
1137

1138
/* -------------------------- System allocation -------------------------- */
1139

1140
static void *alloc_sys(mstate m, size_t nb)
37,383✔
1141
{
1142
  char *tbase = CMFAIL;
37,383✔
1143
  size_t tsize = 0;
37,383✔
1144

1145
  /* Directly map large chunks */
1146
  if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
37,383✔
1147
    void *mem = direct_alloc(nb);
2,996✔
1148
    if (mem != 0)
2,996✔
1149
      return mem;
1150
  }
1151

1152
  {
1153
    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
34,387✔
1154
    size_t rsize = granularity_align(req);
34,387✔
1155
    if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
34,387✔
1156
      char *mp = (char *)(CALL_MMAP(rsize));
34,387✔
1157
      if (mp != CMFAIL) {
34,387✔
1158
        tbase = mp;
34,387✔
1159
        tsize = rsize;
34,387✔
1160
      }
1161
    }
1162
  }
1163

1164
  if (tbase != CMFAIL) {
34,387✔
1165
    msegmentptr sp = &m->seg;
34,387✔
1166
    /* Try to merge with an existing segment */
1167
    while (sp != 0 && tbase != sp->base + sp->size)
281,222✔
1168
      sp = sp->next;
246,835✔
1169
    if (sp != 0 && segment_holds(sp, m->top)) { /* append */
34,387✔
1170
      sp->size += tsize;
60✔
1171
      init_top(m, m->top, m->topsize + tsize);
60✔
1172
    } else {
1173
      sp = &m->seg;
1174
      while (sp != 0 && sp->base != tbase + tsize)
46,949✔
1175
        sp = sp->next;
12,622✔
1176
      if (sp != 0) {
34,327✔
1177
        char *oldbase = sp->base;
34,273✔
1178
        sp->base = tbase;
34,273✔
1179
        sp->size += tsize;
34,273✔
1180
        return prepend_alloc(m, tbase, oldbase, nb);
34,273✔
1181
      } else {
1182
        add_segment(m, tbase, tsize);
54✔
1183
      }
1184
    }
1185

1186
    if (nb < m->topsize) { /* Allocate from new or extended top space */
114✔
1187
      size_t rsize = m->topsize -= nb;
114✔
1188
      mchunkptr p = m->top;
114✔
1189
      mchunkptr r = m->top = chunk_plus_offset(p, nb);
114✔
1190
      r->head = rsize | PINUSE_BIT;
114✔
1191
      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
114✔
1192
      return chunk2mem(p);
114✔
1193
    }
1194
  }
1195

1196
  return NULL;
1197
}
1198

1199
/* -----------------------  system deallocation -------------------------- */
1200

1201
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1202
static size_t release_unused_segments(mstate m)
541,891✔
1203
{
1204
  size_t released = 0;
541,891✔
1205
  size_t nsegs = 0;
541,891✔
1206
  msegmentptr pred = &m->seg;
541,891✔
1207
  msegmentptr sp = pred->next;
541,891✔
1208
  while (sp != 0) {
4,413,238✔
1209
    char *base = sp->base;
3,871,347✔
1210
    size_t size = sp->size;
3,871,347✔
1211
    msegmentptr next = sp->next;
3,871,347✔
1212
    nsegs++;
3,871,347✔
1213
    {
1214
      mchunkptr p = align_as_chunk(base);
3,871,347✔
1215
      size_t psize = chunksize(p);
3,871,347✔
1216
      /* Can unmap if first chunk holds entire segment and not pinned */
1217
      if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
3,871,347✔
1218
        tchunkptr tp = (tchunkptr)p;
15✔
1219
        if (p == m->dv) {
15✔
1220
          m->dv = 0;
×
1221
          m->dvsize = 0;
×
1222
        } else {
1223
          unlink_large_chunk(m, tp);
15✔
1224
        }
1225
        if (CALL_MUNMAP(base, size) == 0) {
15✔
1226
          released += size;
15✔
1227
          /* unlink obsoleted record */
1228
          sp = pred;
15✔
1229
          sp->next = next;
15✔
1230
        } else { /* back out if cannot unmap */
1231
          insert_large_chunk(m, tp, psize);
×
1232
        }
1233
      }
1234
    }
1235
    pred = sp;
1236
    sp = next;
1237
  }
1238
  /* Reset check counter */
1239
  m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
541,891✔
1240
                      nsegs : MAX_RELEASE_CHECK_RATE;
541,891✔
1241
  return released;
541,891✔
1242
}
1243

1244
static int alloc_trim(mstate m, size_t pad)
22✔
1245
{
1246
  size_t released = 0;
22✔
1247
  if (pad < MAX_REQUEST && is_initialized(m)) {
22✔
1248
    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
22✔
1249

1250
    if (m->topsize > pad) {
22✔
1251
      /* Shrink top space in granularity-size units, keeping at least one */
1252
      size_t unit = DEFAULT_GRANULARITY;
22✔
1253
      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
22✔
1254
                      SIZE_T_ONE) * unit;
1255
      msegmentptr sp = segment_holding(m, (char *)m->top);
22✔
1256

1257
      if (sp->size >= extra &&
22✔
1258
          !has_segment_link(m, sp)) { /* can't shrink if pinned */
44✔
1259
        size_t newsize = sp->size - extra;
22✔
1260
        /* Prefer mremap, fall back to munmap */
1261
        if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
22✔
1262
            (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
×
1263
          released = extra;
22✔
1264
        }
1265
      }
1266

1267
      if (released != 0) {
22✔
1268
        sp->size -= released;
22✔
1269
        init_top(m, m->top, m->topsize - released);
22✔
1270
      }
1271
    }
1272

1273
    /* Unmap any unused mmapped segments */
1274
    released += release_unused_segments(m);
22✔
1275

1276
    /* On failure, disable autotrim to avoid repeated failed future calls */
1277
    if (released == 0 && m->topsize > m->trim_check)
22✔
1278
      m->trim_check = MAX_SIZE_T;
×
1279
  }
1280

1281
  return (released != 0)? 1 : 0;
22✔
1282
}
1283

1284
/* ---------------------------- malloc support --------------------------- */
1285

1286
/* allocate a large request from the best fitting chunk in a treebin */
1287
static void *tmalloc_large(mstate m, size_t nb)
284,664✔
1288
{
1289
  tchunkptr v = 0;
284,664✔
1290
  size_t rsize = ~nb+1; /* Unsigned negation */
284,664✔
1291
  tchunkptr t;
284,664✔
1292
  bindex_t idx;
284,664✔
1293
  compute_tree_index(nb, idx);
284,664✔
1294

1295
  if ((t = *treebin_at(m, idx)) != 0) {
284,664✔
1296
    /* Traverse tree for this bin looking for node with size == nb */
1297
    size_t sizebits = nb << leftshift_for_tree_index(idx);
123,246✔
1298
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
123,246✔
1299
    for (;;) {
261,422✔
1300
      tchunkptr rt;
192,334✔
1301
      size_t trem = chunksize(t) - nb;
192,334✔
1302
      if (trem < rsize) {
192,334✔
1303
        v = t;
104,498✔
1304
        if ((rsize = trem) == 0)
104,498✔
1305
          break;
1306
      }
1307
      rt = t->child[1];
169,847✔
1308
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
169,847✔
1309
      if (rt != 0 && rt != t)
169,847✔
1310
        rst = rt;
55,433✔
1311
      if (t == 0) {
169,847✔
1312
        t = rst; /* set t to least subtree holding sizes > nb */
1313
        break;
1314
      }
1315
      sizebits <<= 1;
69,088✔
1316
    }
1317
  }
1318

1319
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
284,664✔
1320
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
219,802✔
1321
    if (leftbits != 0)
219,802✔
1322
      t = *treebin_at(m, lj_ffs(leftbits));
209,694✔
1323
  }
1324

1325
  while (t != 0) { /* find smallest of tree or subtree */
649,772✔
1326
    size_t trem = chunksize(t) - nb;
365,108✔
1327
    if (trem < rsize) {
365,108✔
1328
      rsize = trem;
282,596✔
1329
      v = t;
282,596✔
1330
    }
1331
    t = leftmost_child(t);
365,108✔
1332
  }
1333

1334
  /*  If dv is a better fit, return NULL so malloc will use it */
1335
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
284,664✔
1336
    mchunkptr r = chunk_plus_offset(v, nb);
234,552✔
1337
    unlink_large_chunk(m, v);
248,900✔
1338
    if (rsize < MIN_CHUNK_SIZE) {
234,552✔
1339
      set_inuse_and_pinuse(m, v, (rsize + nb));
35,275✔
1340
    } else {
1341
      set_size_and_pinuse_of_inuse_chunk(m, v, nb);
199,277✔
1342
      set_size_and_pinuse_of_free_chunk(r, rsize);
199,277✔
1343
      insert_chunk(m, r, rsize);
227,618✔
1344
    }
1345
    return chunk2mem(v);
234,552✔
1346
  }
1347
  return NULL;
1348
}
1349

1350
/* allocate a small request from the best fitting chunk in a treebin */
1351
static void *tmalloc_small(mstate m, size_t nb)
167,223✔
1352
{
1353
  tchunkptr t, v;
167,223✔
1354
  mchunkptr r;
167,223✔
1355
  size_t rsize;
167,223✔
1356
  bindex_t i = lj_ffs(m->treemap);
167,223✔
1357

1358
  v = t = *treebin_at(m, i);
167,223✔
1359
  rsize = chunksize(t) - nb;
167,223✔
1360

1361
  while ((t = leftmost_child(t)) != 0) {
358,756✔
1362
    size_t trem = chunksize(t) - nb;
191,533✔
1363
    if (trem < rsize) {
191,533✔
1364
      rsize = trem;
98,180✔
1365
      v = t;
98,180✔
1366
    }
1367
  }
1368

1369
  r = chunk_plus_offset(v, nb);
167,223✔
1370
  unlink_large_chunk(m, v);
201,920✔
1371
  if (rsize < MIN_CHUNK_SIZE) {
167,223✔
1372
    set_inuse_and_pinuse(m, v, (rsize + nb));
17✔
1373
  } else {
1374
    set_size_and_pinuse_of_inuse_chunk(m, v, nb);
167,206✔
1375
    set_size_and_pinuse_of_free_chunk(r, rsize);
167,206✔
1376
    replace_dv(m, r, rsize);
167,206✔
1377
  }
1378
  return chunk2mem(v);
167,223✔
1379
}
1380

1381
/* ----------------------------------------------------------------------- */
1382

1383
void *lj_alloc_create(void)
326✔
1384
{
1385
  size_t tsize = DEFAULT_GRANULARITY;
326✔
1386
#if LUAJIT_USE_ASAN
1387
  tsize -= TOTAL_REDZONE_SIZE;
1388
#endif
1389
  char *tbase;
326✔
1390
  INIT_MMAP();
326✔
1391
  tbase = (char *)(CALL_MMAP(tsize));
326✔
1392
  if (tbase != CMFAIL) {
326✔
1393
    size_t msize = pad_request(sizeof(struct malloc_state));
326✔
1394
    mchunkptr mn;
326✔
1395
#if LUAJIT_USE_ASAN
1396
    mchunkptr msp = (mchunkptr)(tbase + align_offset(chunk2mem(tbase) - REDZONE_SIZE));
1397
    mstate m = (mstate)(chunk2mem(msp) - REDZONE_SIZE);
1398
#else
1399
    mchunkptr msp = align_as_chunk(tbase);
326✔
1400
    mstate m = (mstate)(chunk2mem(msp));
326✔
1401
#endif
1402
    memset(m, 0, msize);
326✔
1403
    msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
326✔
1404
    m->seg.base = tbase;
326✔
1405
    m->seg.size = tsize;
326✔
1406
    m->release_checks = MAX_RELEASE_CHECK_RATE;
326✔
1407
    init_bins(m);
326✔
1408
#if LUAJIT_USE_ASAN
1409
    mn = next_chunk((mchunkptr)((char *)(m) - TWO_SIZE_T_SIZES));
1410
#else
1411
    mn = next_chunk(mem2chunk(m));
326✔
1412
#endif
1413
    init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
326✔
1414
    return m;
326✔
1415
  }
1416
  return NULL;
1417
}
1418

1419
void lj_alloc_destroy(void *msp)
316✔
1420
{
1421
  mstate ms = (mstate)msp;
316✔
1422
  msegmentptr sp = &ms->seg;
316✔
1423
  while (sp != 0) {
671✔
1424
    char *base = sp->base;
355✔
1425
    size_t size = sp->size;
355✔
1426
    sp = sp->next;
355✔
1427
#if LUAJIT_USE_ASAN
1428
    ASAN_UNPOISON_MEMORY_REGION(base, size);
1429
#endif
1430
    CALL_MUNMAP(base, size);
355✔
1431
  }
1432
}
316✔
1433

1434
static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
143,916,596✔
1435
{
1436
#if LUAJIT_USE_ASAN
1437
  size_t mem_size = nsize;
1438
  size_t poison_size = (size_t)align_up((void *)nsize, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
1439
  nsize = poison_size;
1440
#endif
1441
  mstate ms = (mstate)msp;
143,916,596✔
1442
  void *mem;
143,916,596✔
1443
  size_t nb;
143,916,596✔
1444
  if (nsize <= MAX_SMALL_REQUEST) {
143,916,596✔
1445
    bindex_t idx;
143,595,508✔
1446
    binmap_t smallbits;
143,595,508✔
1447
    nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
143,595,508✔
1448
    idx = small_index(nb);
143,595,508✔
1449
    smallbits = ms->smallmap >> idx;
143,595,508✔
1450

1451
    if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
143,595,508✔
1452
      mchunkptr b, p;
419,882✔
1453
      idx += ~smallbits & 1;       /* Uses next bin if idx empty */
419,882✔
1454
      b = smallbin_at(ms, idx);
419,882✔
1455
      p = b->fd;
419,882✔
1456
      unlink_first_small_chunk(ms, b, p, idx);
419,882✔
1457
      set_inuse_and_pinuse(ms, p, small_index2size(idx));
419,882✔
1458
      mem = chunk2mem(p);
419,882✔
1459
#if LUAJIT_USE_ASAN
1460
      mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1461
#endif
1462
      return mem;
419,882✔
1463
    } else if (nb > ms->dvsize) {
143,175,626✔
1464
      if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
721,676✔
1465
        mchunkptr b, p, r;
132,794✔
1466
        size_t rsize;
132,794✔
1467
        binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
132,794✔
1468
        bindex_t i = lj_ffs(leftbits);
132,794✔
1469
        b = smallbin_at(ms, i);
132,794✔
1470
        p = b->fd;
132,794✔
1471
        unlink_first_small_chunk(ms, b, p, i);
132,794✔
1472
        rsize = small_index2size(i) - nb;
132,794✔
1473
        /* Fit here cannot be remainderless if 4byte sizes */
1474
        if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
132,794✔
1475
          set_inuse_and_pinuse(ms, p, small_index2size(i));
25,863✔
1476
        } else {
1477
          set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
106,931✔
1478
          r = chunk_plus_offset(p, nb);
106,931✔
1479
          set_size_and_pinuse_of_free_chunk(r, rsize);
106,931✔
1480
          replace_dv(ms, r, rsize);
106,931✔
1481
        }
1482
        mem = chunk2mem(p);
132,794✔
1483
#if LUAJIT_USE_ASAN
1484
  mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1485
#endif
1486
        return mem;
132,794✔
1487
      } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
588,882✔
1488
#if LUAJIT_USE_ASAN
1489
  mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1490
#endif
1491
        return mem;
1492
      }
1493
    }
1494
  } else if (nsize >= MAX_REQUEST) {
321,088✔
1495
    nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1496
  } else {
1497
    nb = pad_request(nsize);
321,088✔
1498
    if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
321,088✔
1499
#if LUAJIT_USE_ASAN
1500
      mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1501
#endif
1502
      return mem;
1503
    }
1504
  }
1505

1506
  if (nb <= ms->dvsize) {
142,962,145✔
1507
    size_t rsize = ms->dvsize - nb;
142,513,917✔
1508
    mchunkptr p = ms->dv;
142,513,917✔
1509
    if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
142,513,917✔
1510
      mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
142,350,905✔
1511
      ms->dvsize = rsize;
142,350,905✔
1512
      set_size_and_pinuse_of_free_chunk(r, rsize);
142,350,905✔
1513
      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
142,350,905✔
1514
    } else { /* exhaust dv */
1515
      size_t dvs = ms->dvsize;
163,012✔
1516
      ms->dvsize = 0;
163,012✔
1517
      ms->dv = 0;
163,012✔
1518
      set_inuse_and_pinuse(ms, p, dvs);
163,012✔
1519
    }
1520
    mem = chunk2mem(p);
142,513,917✔
1521
#if LUAJIT_USE_ASAN
1522
    mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1523
#endif
1524
    return mem;
142,513,917✔
1525
  } else if (nb < ms->topsize) { /* Split top */
448,228✔
1526
    size_t rsize = ms->topsize -= nb;
410,845✔
1527
    mchunkptr p = ms->top;
410,845✔
1528
    mchunkptr r = ms->top = chunk_plus_offset(p, nb);
410,845✔
1529
    r->head = rsize | PINUSE_BIT;
410,845✔
1530
    set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
410,845✔
1531
    mem = chunk2mem(p);
410,845✔
1532
#if LUAJIT_USE_ASAN
1533
    mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1534
#endif
1535
    return mem;
410,845✔
1536
  }
1537
#if LUAJIT_USE_ASAN
1538
  return mark_memory_region(alloc_sys(ms, nb) - REDZONE_SIZE, mem_size, poison_size);
1539
#else
1540
  return alloc_sys(ms, nb);
37,383✔
1541
#endif
1542
}
1543

1544
static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
143,937,455✔
1545
{
1546
#if LUAJIT_USE_ASAN
1547
  if (ptr != 0) {    
1548
    size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1549
    size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1550

1551
    memmove(ptr, ptr, mem_size);
1552
    ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE, poison_size);
1553
  }
1554
  return NULL;
1555
#else
1556
  if (ptr != 0) {
143,937,455✔
1557
    mchunkptr p = mem2chunk(ptr);
143,908,875✔
1558
    mstate fm = (mstate)msp;
143,908,875✔
1559
    size_t psize = chunksize(p);
143,908,875✔
1560
    mchunkptr next = chunk_plus_offset(p, psize);
143,908,875✔
1561
    if (!pinuse(p)) {
143,908,875✔
1562
      size_t prevsize = p->prev_foot;
2,650,188✔
1563
      if ((prevsize & IS_DIRECT_BIT) != 0) {
2,650,188✔
1564
        prevsize &= ~IS_DIRECT_BIT;
2,996✔
1565
        psize += prevsize + DIRECT_FOOT_PAD;
2,996✔
1566
        CALL_MUNMAP((char *)p - prevsize, psize);
2,996✔
1567
        return NULL;
2,996✔
1568
      } else {
1569
        mchunkptr prev = chunk_minus_offset(p, prevsize);
2,647,192✔
1570
        psize += prevsize;
2,647,192✔
1571
        p = prev;
2,647,192✔
1572
        /* consolidate backward */
1573
        if (p != fm->dv) {
2,647,192✔
1574
          unlink_chunk(fm, p, prevsize);
2,570,749✔
1575
        } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
146,563✔
1576
          fm->dvsize = psize;
3,732✔
1577
          set_free_with_pinuse(p, psize, next);
3,732✔
1578
          return NULL;
3,732✔
1579
        }
1580
      }
1581
    }
1582
    if (!cinuse(next)) {  /* consolidate forward */
143,902,147✔
1583
      if (next == fm->top) {
140,648,726✔
1584
        size_t tsize = fm->topsize += psize;
31,058✔
1585
        fm->top = p;
31,058✔
1586
        p->head = tsize | PINUSE_BIT;
31,058✔
1587
        if (p == fm->dv) {
31,058✔
1588
          fm->dv = 0;
227✔
1589
          fm->dvsize = 0;
227✔
1590
        }
1591
        if (tsize > fm->trim_check)
31,058✔
1592
          alloc_trim(fm, 0);
22✔
1593
        return NULL;
31,058✔
1594
      } else if (next == fm->dv) {
140,617,668✔
1595
        size_t dsize = fm->dvsize += psize;
613,036✔
1596
        fm->dv = p;
613,036✔
1597
        set_size_and_pinuse_of_free_chunk(p, dsize);
613,036✔
1598
        return NULL;
613,036✔
1599
      } else {
1600
        size_t nsize = chunksize(next);
140,004,632✔
1601
        psize += nsize;
140,004,632✔
1602
        unlink_chunk(fm, next, nsize);
140,075,658✔
1603
        set_size_and_pinuse_of_free_chunk(p, psize);
140,004,632✔
1604
        if (p == fm->dv) {
140,004,632✔
1605
          fm->dvsize = psize;
142,604✔
1606
          return NULL;
142,604✔
1607
        }
1608
      }
1609
    } else {
1610
      set_free_with_pinuse(p, psize, next);
3,253,421✔
1611
    }
1612

1613
    if (is_small(psize)) {
143,115,449✔
1614
      insert_small_chunk(fm, p, psize);
4,887,342✔
1615
    } else {
1616
      tchunkptr tp = (tchunkptr)p;
138,228,107✔
1617
      insert_large_chunk(fm, tp, psize);
142,879,997✔
1618
      if (--fm->release_checks == 0)
138,228,107✔
1619
        release_unused_segments(fm);
541,869✔
1620
    }
1621
  }
1622
  return NULL;
1623
#endif
1624
}
1625

1626
static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
122,144✔
1627
{
1628
#if LUAJIT_USE_ASAN
1629
  if (nsize >= MAX_REQUEST)
1630
    return NULL;
1631

1632
  mstate m = (mstate)msp;
1633

1634
  size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1635
  size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1636

1637
  void *newmem = lj_alloc_malloc(m, nsize);
1638

1639
  if (newmem == NULL)
1640
    return NULL;
1641

1642
  memcpy(newmem, ptr, nsize > mem_size ? mem_size : nsize);
1643
  ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE, poison_size);
1644
  return newmem;
1645
#else
1646
  if (nsize >= MAX_REQUEST) {
122,144✔
1647
    return NULL;
1648
  } else {
1649
    mstate m = (mstate)msp;
122,144✔
1650
    mchunkptr oldp = mem2chunk(ptr);
122,144✔
1651
    size_t oldsize = chunksize(oldp);
122,144✔
1652
    mchunkptr next = chunk_plus_offset(oldp, oldsize);
122,144✔
1653
    mchunkptr newp = 0;
122,144✔
1654
    size_t nb = request2size(nsize);
122,144✔
1655

1656
    /* Try to either shrink or extend into top. Else malloc-copy-free */
1657
    if (is_direct(oldp)) {
122,144✔
1658
      newp = direct_resize(oldp, nb);  /* this may return NULL. */
125✔
1659
    } else if (oldsize >= nb) { /* already big enough */
122,019✔
1660
      size_t rsize = oldsize - nb;
659✔
1661
      newp = oldp;
659✔
1662
      if (rsize >= MIN_CHUNK_SIZE) {
659✔
1663
        mchunkptr rem = chunk_plus_offset(newp, nb);
650✔
1664
        set_inuse(m, newp, nb);
650✔
1665
        set_inuse(m, rem, rsize);
650✔
1666
        lj_alloc_free(m, chunk2mem(rem));
650✔
1667
      }
1668
    } else if (next == m->top && oldsize + m->topsize > nb) {
121,360✔
1669
      /* Expand into top */
1670
      size_t newsize = oldsize + m->topsize;
505✔
1671
      size_t newtopsize = newsize - nb;
505✔
1672
      mchunkptr newtop = chunk_plus_offset(oldp, nb);
505✔
1673
      set_inuse(m, oldp, nb);
505✔
1674
      newtop->head = newtopsize |PINUSE_BIT;
505✔
1675
      m->top = newtop;
505✔
1676
      m->topsize = newtopsize;
505✔
1677
      newp = oldp;
505✔
1678
    }
1679

1680
    if (newp != 0) {
1,289✔
1681
      return chunk2mem(newp);
1,286✔
1682
    } else {
1683
      void *newmem = lj_alloc_malloc(m, nsize);
120,858✔
1684
      if (newmem != 0) {
120,858✔
1685
        size_t oc = oldsize - overhead_for(oldp);
120,858✔
1686
        memcpy(newmem, ptr, oc < nsize ? oc : nsize);
120,858✔
1687
        lj_alloc_free(m, ptr);
120,858✔
1688
      }
1689
      return newmem;
120,858✔
1690
    }
1691
  }
1692
#endif
1693
}
1694

1695
void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
287,733,829✔
1696
{
1697
  (void)osize;
287,733,829✔
1698
  if (nsize == 0) {
287,733,829✔
1699
    return lj_alloc_free(msp, ptr);
143,815,947✔
1700
  } else if (ptr == NULL) {
143,917,882✔
1701
    return lj_alloc_malloc(msp, nsize);
143,795,738✔
1702
  } else {
1703
    return lj_alloc_realloc(msp, ptr, nsize);
122,144✔
1704
  }
1705
}
1706

1707
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc