• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 9581351379

19 Jun 2024 11:40AM UTC coverage: 92.652% (+0.06%) from 92.591%
9581351379

push

github

mandesero
fix alloc

5661 of 6018 branches covered (94.07%)

Branch coverage included in aggregate %.

21612 of 23418 relevant lines covered (92.29%)

1502850.63 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.13
/src/lj_alloc.c
1
/*
2
** Bundled memory allocator.
3
**
4
** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
5
** The original bears the following remark:
6
**
7
**   This is a version (aka dlmalloc) of malloc/free/realloc written by
8
**   Doug Lea and released to the public domain, as explained at
9
**   http://creativecommons.org/licenses/publicdomain.
10
**
11
**   * Version pre-2.8.4 Wed Mar 29 19:46:29 2006    (dl at gee)
12
**
13
** No additional copyright is claimed over the customizations.
14
** Please do NOT bother the original author about this version here!
15
**
16
** If you want to use dlmalloc in another project, you should get
17
** the original from: ftp://gee.cs.oswego.edu/pub/misc/
18
** For thread-safe derivatives, take a look at:
19
** - ptmalloc: http://www.malloc.de/
20
** - nedmalloc: http://www.nedprod.com/programs/portable/nedmalloc/
21
*/
22

23
#define lj_alloc_c
24
#define LUA_CORE
25

26
/* To get the mremap prototype. Must be defined before any system includes. */
27
#if defined(__linux__) && !defined(_GNU_SOURCE)
28
#define _GNU_SOURCE
29
#endif
30

31
#include "lj_def.h"
32
#include "lj_arch.h"
33
#include "lj_alloc.h"
34

35
#ifndef LUAJIT_USE_SYSMALLOC
36

37
#define MAX_SIZE_T                (~(size_t)0)
38
#define MALLOC_ALIGNMENT        ((size_t)8U)
39

40
#define DEFAULT_GRANULARITY        ((size_t)128U * (size_t)1024U)
41
#define DEFAULT_TRIM_THRESHOLD        ((size_t)2U * (size_t)1024U * (size_t)1024U)
42
#define DEFAULT_MMAP_THRESHOLD        ((size_t)128U * (size_t)1024U)
43
#define MAX_RELEASE_CHECK_RATE        255
44

45
/* ------------------- size_t and alignment properties -------------------- */
46

47
/* The byte and bit size of a size_t */
48
#define SIZE_T_SIZE                (sizeof(size_t))
49
#define SIZE_T_BITSIZE                (sizeof(size_t) << 3)
50

51
/* Some constants coerced to size_t */
52
/* Annoying but necessary to avoid errors on some platforms */
53
#define SIZE_T_ZERO                ((size_t)0)
54
#define SIZE_T_ONE                ((size_t)1)
55
#define SIZE_T_TWO                ((size_t)2)
56
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
57
#define FOUR_SIZE_T_SIZES        (SIZE_T_SIZE<<2)
58
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
59

60
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
61
#define CHUNK_ALIGN_MASK        (MALLOC_ALIGNMENT - SIZE_T_ONE)
62

63
/* the number of bytes to offset an address to align it */
64
#define align_offset(A)\
65
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
66
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
67

68
/* -------------------------- MMAP support ------------------------------- */
69

70
#define MFAIL                        ((void *)(MAX_SIZE_T))
71
#define CMFAIL                        ((char *)(MFAIL)) /* defined for convenience */
72

73
#define IS_DIRECT_BIT                (SIZE_T_ONE)
74

75

76
/* Determine system-specific block allocation method. */
77
#if LJ_TARGET_WINDOWS
78

79
#define WIN32_LEAN_AND_MEAN
80
#include <windows.h>
81

82
#define LJ_ALLOC_VIRTUALALLOC        1
83

84
#if LJ_64 && !LJ_GC64
85
#define LJ_ALLOC_NTAVM                1
86
#endif
87

88
#else
89

90
#include <errno.h>
91
/* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
92
#include <sys/mman.h>
93

94
#define LJ_ALLOC_MMAP                1
95

96
#if LJ_64
97

98
#define LJ_ALLOC_MMAP_PROBE        1
99

100
#if LJ_GC64
101
#define LJ_ALLOC_MBITS                47        /* 128 TB in LJ_GC64 mode. */
102
#elif LJ_TARGET_X64 && LJ_HASJIT
103
/* Due to limitations in the x64 compiler backend. */
104
#define LJ_ALLOC_MBITS                31        /* 2 GB on x64 with !LJ_GC64. */
105
#else
106
#define LJ_ALLOC_MBITS                32        /* 4 GB on other archs with !LJ_GC64. */
107
#endif
108

109
#endif
110

111
#if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
112
#define LJ_ALLOC_MMAP32                1
113
#endif
114

115
#if LJ_TARGET_LINUX
116
#define LJ_ALLOC_MREMAP                1
117
#endif
118

119
#endif
120

121

122
#if LJ_ALLOC_VIRTUALALLOC
123

124
#if LJ_ALLOC_NTAVM
125
/* Undocumented, but hey, that's what we all love so much about Windows. */
126
typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG zbits,
127
                       size_t *size, ULONG alloctype, ULONG prot);
128
static PNTAVM ntavm;
129

130
/* Number of top bits of the lower 32 bits of an address that must be zero.
131
** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
132
*/
133
#define NTAVM_ZEROBITS                1
134

135
static void init_mmap(void)
136
{
137
  ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
138
                                 "NtAllocateVirtualMemory");
139
}
140
#define INIT_MMAP()        init_mmap()
141

142
/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
143
static void *CALL_MMAP(size_t size)
144
{
145
  DWORD olderr = GetLastError();
146
  void *ptr = NULL;
147
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
148
                  MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
149
  SetLastError(olderr);
150
  return st == 0 ? ptr : MFAIL;
151
}
152

153
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
154
static void *DIRECT_MMAP(size_t size)
155
{
156
  DWORD olderr = GetLastError();
157
  void *ptr = NULL;
158
  long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
159
                  MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
160
  SetLastError(olderr);
161
  return st == 0 ? ptr : MFAIL;
162
}
163

164
#else
165

166
/* Win32 MMAP via VirtualAlloc */
167
static void *CALL_MMAP(size_t size)
168
{
169
  DWORD olderr = GetLastError();
170
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
171
  SetLastError(olderr);
172
  return ptr ? ptr : MFAIL;
173
}
174

175
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
176
static void *DIRECT_MMAP(size_t size)
177
{
178
  DWORD olderr = GetLastError();
179
  void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
180
                            PAGE_READWRITE);
181
  SetLastError(olderr);
182
  return ptr ? ptr : MFAIL;
183
}
184

185
#endif
186

187
/* This function supports releasing coalesed segments */
188
static int CALL_MUNMAP(void *ptr, size_t size)
189
{
190
  DWORD olderr = GetLastError();
191
  MEMORY_BASIC_INFORMATION minfo;
192
  char *cptr = (char *)ptr;
193
  while (size) {
194
    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
195
      return -1;
196
    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
197
        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
198
      return -1;
199
    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
200
      return -1;
201
    cptr += minfo.RegionSize;
202
    size -= minfo.RegionSize;
203
  }
204
  SetLastError(olderr);
205
  return 0;
206
}
207

208
#elif LJ_ALLOC_MMAP
209

210
#define MMAP_PROT                (PROT_READ|PROT_WRITE)
211
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
212
#define MAP_ANONYMOUS                MAP_ANON
213
#endif
214
#define MMAP_FLAGS                (MAP_PRIVATE|MAP_ANONYMOUS)
215

216
#if LJ_ALLOC_MMAP_PROBE
217

218
#ifdef MAP_TRYFIXED
219
#define MMAP_FLAGS_PROBE        (MMAP_FLAGS|MAP_TRYFIXED)
220
#else
221
#define MMAP_FLAGS_PROBE        MMAP_FLAGS
222
#endif
223

224
#define LJ_ALLOC_MMAP_PROBE_MAX                30
225
#define LJ_ALLOC_MMAP_PROBE_LINEAR        5
226

227
/**
228
 * || `[0x10007fff8000, 0x7fffffffffff]` || HighMem    || <--
229
 * || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow ||
230
 * || `[0x00008fff7000, 0x02008fff6fff]` || ShadowGap  ||
231
 * || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow  ||
232
 * || `[0x000000000000, 0x00007fff7fff]` || LowMem     ||
233
 */
234

235
#define LJ_ALLOC_MMAP_PROBE_LOWER        ((uintptr_t)0x4000)
236

237
/* No point in a giant ifdef mess. Just try to open /dev/urandom.
238
** It doesn't really matter if this fails, since we get some ASLR bits from
239
** every unsuitable allocation, too. And we prefer linear allocation, anyway.
240
*/
241
#include <fcntl.h>
242
#include <unistd.h>
243

244
#if LUAJIT_USE_ASAN
245

246
/*
247
** The work of asan (AddressSanitizer) is to detect memory errors during program execution.
248
** One way to achieve this is by adding redzones around memory allocations. The redzone is a
249
** specially allocated area of memory before and after the allocated block, which is filled
250
** with a unique value. If the program tries to access memory outside of the allocation,
251
** asan detects this attempt and generates an error message, allowing the developer to
252
** detect and fix the issue early.
253
**
254
** - Original paper: https://www.usenix.org/system/files/conference/atc12/atc12-final39.pdf
255
**
256
** LuaJIT ASAN instrumentation (mmap and others):
257
**
258
** - Memory map around allocation:
259
** -------------------------------------------------------------------------------------
260
** .. .. | [f7]    ...    [f7] | [00]     ...     [0(0-7)] | [f7]    ...    [f7] | .. ..
261
**       |    left redzone     |           data            |    right redzone    |
262
**       |  REDZONE_SIZE bytes |          N bytes          |  REDZONE_SIZE bytes |
263
** ------------------------------------------------------------------------------------- 
264
**
265
** left redzone: 
266
**  The first SIZE_T_SIZE bytes of the redzone contain the data size N, the next SIZE_T_SIZE bytes 
267
**  of the redzone contain the full size of the allocation, including the alignment of the size N 
268
**  and the size of the redzones themselves.
269
*/
270

271
#include <sanitizer/asan_interface.h>
272

273
/* Recommended redzone size from 16 to 2048 bytes (must be a a power of two) 
274
** https://github.com/google/sanitizers/wiki/AddressSanitizerFlags
275
*/
276
#define REDZONE_SIZE FOUR_SIZE_T_SIZES
277

278
/* Total redzone size around allocation */
279
#define TOTAL_REDZONE_SIZE (REDZONE_SIZE << 1)
280

281
/* Multiple of the allocated memory size */
282
#define SIZE_ALIGNMENT MALLOC_ALIGNMENT
283

284
/* Multiple of the allocated memory address */
285
#define ADDR_ALIGNMENT MALLOC_ALIGNMENT
286

287
static inline uintptr_t asan_lower_address()
288
{
289
#if !LJ_GC64
290
  return (uintptr_t)(0x40000000);
291
#endif
292
  size_t shadow_scale;
293
  size_t shadow_offset;
294
  __asan_get_shadow_mapping(&shadow_scale, &shadow_offset);
295
  return (uintptr_t)(shadow_offset + (1ULL << (LJ_ALLOC_MBITS - shadow_scale)));
296
}
297

298
/* Casting to the nearest multiple of alignment from above */
299
#define align_up_ptr(ptr, alignment)  ((void *)(((uintptr_t)(ptr) + (alignment) - 1) & ~((alignment) - 1)))
300
#define align_up_size(ptr, alignment)  ((size_t)(align_up_ptr(ptr, alignment)))
301

302
void *mark_memory_region(void *ptr, size_t mem_size, size_t poison_size)
303
{
304
  if (ptr == NULL)
305
    return NULL;
306
  size_t *sptr = (size_t *)ptr;
307
  ASAN_UNPOISON_MEMORY_REGION(ptr, TWO_SIZE_T_SIZES);
308
  sptr[0] = mem_size;
309
  sptr[1] = poison_size;
310
  ASAN_POISON_MEMORY_REGION(ptr, poison_size);
311
  ptr += REDZONE_SIZE;
312
  ASAN_UNPOISON_MEMORY_REGION(ptr, mem_size);
313
  return ptr;
314
}
315

316
typedef enum {
317
  MEM_SIZE,
318
  POISON_SIZE
319
} SizeType;
320

321
size_t asan_get_size(void *ptr, SizeType type)
322
{
323
  size_t offset = (type == MEM_SIZE) ? 0 : SIZE_T_SIZE;
324
  ASAN_UNPOISON_MEMORY_REGION(ptr - REDZONE_SIZE + offset, SIZE_T_SIZE);
325
  size_t size = *((size_t *)(ptr - REDZONE_SIZE + offset));
326
  ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE + offset, SIZE_T_SIZE);
327
  return size;
328
}
329

330
#endif
331

332
static uintptr_t mmap_probe_seed(void)
×
333
{
334
  uintptr_t val;
×
335
  int fd = open("/dev/urandom", O_RDONLY);
×
336
  if (fd != -1) {
×
337
    int ok = ((size_t)read(fd, &val, sizeof(val)) == sizeof(val));
×
338
    (void)close(fd);
×
339
    if (ok) return val;
×
340
  }
341
  return 1;  /* Punt. */
342
}
343

344
static void *mmap_probe(size_t size)
21,233✔
345
{
346
  /* Hint for next allocation. Doesn't need to be thread-safe. */
347
  static uintptr_t hint_addr = 0;
21,233✔
348
  static uintptr_t hint_prng = 0;
21,233✔
349
  int olderr = errno;
21,233✔
350
  int retry;
21,233✔
351
#if LUAJIT_USE_ASAN
352
  size_t mem_size = size;
353
  size = align_up_size(size + TOTAL_REDZONE_SIZE, DEFAULT_GRANULARITY);
354
#endif
355
  for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
21,233✔
356
    void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
21,233✔
357
    uintptr_t addr = (uintptr_t)p;
21,233✔
358
#if LUAJIT_USE_ASAN
359
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= asan_lower_address() &&
360
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
361
#else
362
    if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER &&
21,233✔
363
        ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
21,233✔
364
#endif
365
      /* We got a suitable address. Bump the hint address. */
366
      hint_addr = addr + size;
21,233✔
367
      errno = olderr;
21,233✔
368
#if LUAJIT_USE_ASAN
369
      p = mark_memory_region(p, mem_size, size);
370
#endif
371
      return p;
21,233✔
372
    }
373
    if (p != MFAIL) {
×
374
      munmap(p, size);
×
375
    } else if (errno == ENOMEM) {
×
376
      return MFAIL;
377
    }
378
    if (hint_addr) {
×
379
      /* First, try linear probing. */
380
      if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
381
        hint_addr += 0x1000000;
×
382
        if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
×
383
          hint_addr = 0;
×
384
        continue;
×
385
      } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
×
386
        /* Next, try a no-hint probe to get back an ASLR address. */
387
        hint_addr = 0;
×
388
        continue;
×
389
      }
390
    }
391
    /* Finally, try pseudo-random probing. */
392
    if (LJ_UNLIKELY(hint_prng == 0)) {
×
393
      hint_prng = mmap_probe_seed();
×
394
    }
395
    /* The unsuitable address we got has some ASLR PRNG bits. */
396
    hint_addr ^= addr & ~((uintptr_t)(LJ_PAGESIZE-1));
×
397
    do {  /* The PRNG itself is very weak, but see above. */
×
398
      hint_prng = hint_prng * 1103515245 + 12345;
×
399
      hint_addr ^= hint_prng * (uintptr_t)LJ_PAGESIZE;
×
400
      hint_addr &= (((uintptr_t)1 << LJ_ALLOC_MBITS)-1);
×
401
    } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
×
402
  }
403
  errno = olderr;
×
404
  return MFAIL;
×
405
}
406

407
#endif
408

409
#if LJ_ALLOC_MMAP32
410

411
#if defined(__sun__)
412
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0x1000)
413
#else
414
#define LJ_ALLOC_MMAP32_START        ((uintptr_t)0)
415
#endif
416

417
static void *mmap_map32(size_t size)
418
{
419
#if LJ_ALLOC_MMAP_PROBE
420
  static int fallback = 0;
421
  if (fallback)
422
    return mmap_probe(size);
423
#endif
424
  {
425
    int olderr = errno;
426
#if LUAJIT_USE_ASAN
427
    size_t mem_size = size;
428
    size = align_up_size(size + TOTAL_REDZONE_SIZE, SIZE_ALIGNMENT);
429
    void *ptr = mmap((void *)asan_lower_address(), size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
430
#else
431
    void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
432
#endif
433

434
#if LUAJIT_USE_ASAN
435
    if (ptr != MFAIL)
436
      ptr = mark_memory_region(ptr, mem_size, size);
437

438
    size = mem_size;
439
#endif
440
    errno = olderr;
441
    /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
442
#if LJ_ALLOC_MMAP_PROBE
443
    if (ptr == MFAIL) {
444
      fallback = 1;
445
      return mmap_probe(size);
446
    }
447
#endif
448
    return ptr;
449
  }
450
}
451

452
#endif
453

454
#if LJ_ALLOC_MMAP32
455
#define CALL_MMAP(size)                mmap_map32(size)
456
#elif LJ_ALLOC_MMAP_PROBE
457
#define CALL_MMAP(size)                mmap_probe(size)
458
#else
459
static void *CALL_MMAP(size_t size)
460
{
461
  int olderr = errno;
462
#if LUAJIT_USE_ASAN
463
  size_t mem_size = size;
464
  size = align_up_size(size + TOTAL_REDZONE_SIZE, DEFAULT_GRANULARITY);
465
  void *ptr = mmap((void *)asan_lower_address(), size, MMAP_PROT, MMAP_FLAGS, -1, 0);
466
#else
467
  void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
468
#endif 
469
  errno = olderr;
470
#if LUAJIT_USE_ASAN
471
  ptr = mark_memory_region(ptr, mem_size, size);
472
#endif
473
  return ptr;
474
}
475
#endif
476

477
#if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4
478

479
#include <sys/resource.h>
480

481
static void init_mmap(void)
482
{
483
  struct rlimit rlim;
484
  rlim.rlim_cur = rlim.rlim_max = 0x10000;
485
  setrlimit(RLIMIT_DATA, &rlim);  /* Ignore result. May fail later. */
486
}
487
#define INIT_MMAP()        init_mmap()
488

489
#endif
490

491
static int CALL_MUNMAP(void *ptr, size_t size)
3,326✔
492
{
493
  int olderr = errno;
3,326✔
494
#if LUAJIT_USE_ASAN
495
  memmove(ptr, ptr, size); /* check that memory is not poisoned */
496
  size = asan_get_size(ptr, POISON_SIZE);
497
  ptr -= REDZONE_SIZE;
498
#endif
499
  int ret = munmap(ptr, size);
3,312✔
500
#if LUAJIT_USE_ASAN
501
  if (ret == 0) {
502
    ASAN_POISON_MEMORY_REGION(ptr, size);
503
  }
504
#endif
505
  errno = olderr;
3,326✔
506
  return ret;
3,326✔
507
}
508

509
#if LJ_ALLOC_MREMAP
510
/* Need to define _GNU_SOURCE to get the mremap prototype. */
511
static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
95✔
512
{
513
  int olderr = errno;
95✔
514
#if LUAJIT_USE_ASAN && !(LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64))
515
  void *new_ptr = mmap_probe(nsz);
516
  if (new_ptr != MFAIL) {
517
    size_t oms = asan_get_size(ptr, MEM_SIZE);
518
    memcpy(new_ptr, ptr, oms);
519
    munmap(ptr, osz);
520
    ptr = new_ptr;
521
  }
522
#else
523

524
#if LUAJIT_USE_ASAN
525
  void *old_ptr = ptr;
526
  size_t nms = nsz; /* new memory size */
527
  osz = asan_get_size(old_ptr, POISON_SIZE);
528
  nsz = align_up_size(nsz, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
529
  ptr -= REDZONE_SIZE;
530
#endif
531
  ptr = mremap(ptr, osz, nsz, flags);
190✔
532
#if LUAJIT_USE_ASAN
533
  if (ptr != MFAIL) { 
534
    /* can return a pointer to the same memory */
535
    ASAN_POISON_MEMORY_REGION(old_ptr - REDZONE_SIZE, osz);
536
    ptr = mark_memory_region(ptr, nms, nsz);
537
  }
538
#endif
539
#endif
540
  errno = olderr;
95✔
541
  return ptr;
95✔
542
}
543

544
#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
545
#define CALL_MREMAP_NOMOVE        0
546
#define CALL_MREMAP_MAYMOVE        1
547
#if LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64)
548
#define CALL_MREMAP_MV                CALL_MREMAP_NOMOVE
549
#else
550
#define CALL_MREMAP_MV                CALL_MREMAP_MAYMOVE
551
#endif
552
#endif
553

554
#endif
555

556

557
#ifndef INIT_MMAP
558
#define INIT_MMAP()                ((void)0)
559
#endif
560

561
#ifndef DIRECT_MMAP
562
#define DIRECT_MMAP(s)                CALL_MMAP(s)
563
#endif
564

565
#ifndef CALL_MREMAP
566
#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
567
#endif
568

569
/* -----------------------  Chunk representations ------------------------ */
570

571
struct malloc_chunk {
572
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
573
  size_t               head;       /* Size and inuse bits. */
574
  struct malloc_chunk *fd;         /* double links -- used only if free. */
575
  struct malloc_chunk *bk;
576
};
577

578
typedef struct malloc_chunk  mchunk;
579
typedef struct malloc_chunk *mchunkptr;
580
typedef struct malloc_chunk *sbinptr;  /* The type of bins of chunks */
581
typedef size_t bindex_t;               /* Described below */
582
typedef unsigned int binmap_t;         /* Described below */
583
typedef unsigned int flag_t;           /* The type of various bit flag sets */
584

585
/* ------------------- Chunks sizes and alignments ----------------------- */
586

587
#define MCHUNK_SIZE                (sizeof(mchunk))
588

589
#define CHUNK_OVERHEAD                (SIZE_T_SIZE)
590

591
/* Direct chunks need a second word of overhead ... */
592
#define DIRECT_CHUNK_OVERHEAD        (TWO_SIZE_T_SIZES)
593
/* ... and additional padding for fake next-chunk at foot */
594
#define DIRECT_FOOT_PAD                (FOUR_SIZE_T_SIZES)
595

596
/* The smallest size we can malloc is an aligned minimal chunk */
597
#define MIN_CHUNK_SIZE\
598
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
599

600
#if LUAJIT_USE_ASAN
601
/* conversion from malloc headers to user pointers, and back */
602
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES + REDZONE_SIZE))
603
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES - REDZONE_SIZE))
604
#else
605
/* conversion from malloc headers to user pointers, and back */
606
#define chunk2mem(p)                ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
607
#define mem2chunk(mem)                ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
608
#endif
609
/* chunk associated with aligned address A */
610
#define align_as_chunk(A)        (mchunkptr)((A) + align_offset(chunk2mem(A)))
611

612
/* Bounds on request (not chunk) sizes. */
613
#define MAX_REQUEST                ((~MIN_CHUNK_SIZE+1) << 2)
614
#define MIN_REQUEST                (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
615

616
/* pad request bytes into a usable size */
617
#define pad_request(req) \
618
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
619

620
/* pad request, checking for minimum (but not maximum) */
621
#define request2size(req) \
622
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
623

624
/* ------------------ Operations on head and foot fields ----------------- */
625

626
#define PINUSE_BIT                (SIZE_T_ONE)
627
#define CINUSE_BIT                (SIZE_T_TWO)
628
#define INUSE_BITS                (PINUSE_BIT|CINUSE_BIT)
629

630
/* Head value for fenceposts */
631
#define FENCEPOST_HEAD                (INUSE_BITS|SIZE_T_SIZE)
632

633
/* extraction of fields from head words */
634
#define cinuse(p)                ((p)->head & CINUSE_BIT)
635
#define pinuse(p)                ((p)->head & PINUSE_BIT)
636
#define chunksize(p)                ((p)->head & ~(INUSE_BITS))
637

638
#define clear_pinuse(p)                ((p)->head &= ~PINUSE_BIT)
639
#define clear_cinuse(p)                ((p)->head &= ~CINUSE_BIT)
640

641
/* Treat space at ptr +/- offset as a chunk */
642
#define chunk_plus_offset(p, s)                ((mchunkptr)(((char *)(p)) + (s)))
643
#define chunk_minus_offset(p, s)        ((mchunkptr)(((char *)(p)) - (s)))
644

645
/* Ptr to next or previous physical malloc_chunk. */
646
#define next_chunk(p)        ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
647
#define prev_chunk(p)        ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
648

649
/* extract next chunk's pinuse bit */
650
#define next_pinuse(p)        ((next_chunk(p)->head) & PINUSE_BIT)
651

652
/* Get/set size at footer */
653
#define get_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot)
654
#define set_foot(p, s)        (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
655

656
/* Set size, pinuse bit, and foot */
657
#define set_size_and_pinuse_of_free_chunk(p, s)\
658
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
659

660
/* Set size, pinuse bit, foot, and clear next pinuse */
661
#define set_free_with_pinuse(p, s, n)\
662
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
663

664
#define is_direct(p)\
665
  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
666

667
/* Get the internal overhead associated with chunk p */
668
#define overhead_for(p)\
669
 (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
670

671
/* ---------------------- Overlaid data structures ----------------------- */
672

673
struct malloc_tree_chunk {
674
  /* The first four fields must be compatible with malloc_chunk */
675
  size_t                    prev_foot;
676
  size_t                    head;
677
  struct malloc_tree_chunk *fd;
678
  struct malloc_tree_chunk *bk;
679

680
  struct malloc_tree_chunk *child[2];
681
  struct malloc_tree_chunk *parent;
682
  bindex_t                  index;
683
};
684

685
typedef struct malloc_tree_chunk  tchunk;
686
typedef struct malloc_tree_chunk *tchunkptr;
687
typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
688

689
/* A little helper macro for trees */
690
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
691

692
/* ----------------------------- Segments -------------------------------- */
693

694
struct malloc_segment {
695
  char        *base;             /* base address */
696
  size_t       size;             /* allocated size */
697
  struct malloc_segment *next;   /* ptr to next segment */
698
};
699

700
typedef struct malloc_segment  msegment;
701
typedef struct malloc_segment *msegmentptr;
702

703
/* ---------------------------- malloc_state ----------------------------- */
704

705
/* Bin types, widths and sizes */
706
#define NSMALLBINS                (32U)
707
#define NTREEBINS                (32U)
708
#define SMALLBIN_SHIFT                (3U)
709
#define SMALLBIN_WIDTH                (SIZE_T_ONE << SMALLBIN_SHIFT)
710
#define TREEBIN_SHIFT                (8U)
711
#define MIN_LARGE_SIZE                (SIZE_T_ONE << TREEBIN_SHIFT)
712
#define MAX_SMALL_SIZE                (MIN_LARGE_SIZE - SIZE_T_ONE)
713
#define MAX_SMALL_REQUEST  (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
714

715
struct malloc_state {
716
  binmap_t   smallmap;
717
  binmap_t   treemap;
718
  size_t     dvsize;
719
  size_t     topsize;
720
  mchunkptr  dv;
721
  mchunkptr  top;
722
  size_t     trim_check;
723
  size_t     release_checks;
724
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
725
  tbinptr    treebins[NTREEBINS];
726
  msegment   seg;
727
};
728

729
typedef struct malloc_state *mstate;
730

731
#define is_initialized(M)        ((M)->top != 0)
732

733
/* -------------------------- system alloc setup ------------------------- */
734

735
/* page-align a size */
736
#define page_align(S)\
737
 (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
738

739
/* granularity-align a size */
740
#define granularity_align(S)\
741
  (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
742
   & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
743

744
#if LJ_TARGET_WINDOWS
745
#define mmap_align(S)        granularity_align(S)
746
#else
747
#define mmap_align(S)        page_align(S)
748
#endif
749

750
/*  True if segment S holds address A */
751
#define segment_holds(S, A)\
752
  ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
753

754
/* Return segment holding given address */
755
static msegmentptr segment_holding(mstate m, char *addr)
70✔
756
{
757
  msegmentptr sp = &m->seg;
70✔
758
  for (;;) {
70✔
759
    if (addr >= sp->base && addr < sp->base + sp->size)
70✔
760
      return sp;
761
    if ((sp = sp->next) == 0)
×
762
      return 0;
763
  }
764
}
765

766
/* Return true if segment contains a segment link */
767
static int has_segment_link(mstate m, msegmentptr ss)
20✔
768
{
769
  msegmentptr sp = &m->seg;
20✔
770
  for (;;) {
61✔
771
    if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
61✔
772
      return 1;
773
    if ((sp = sp->next) == 0)
61✔
774
      return 0;
775
  }
776
}
777

778
/*
779
  TOP_FOOT_SIZE is padding at the end of a segment, including space
780
  that may be needed to place segment records and fenceposts when new
781
  noncontiguous segments are added.
782
*/
783
#define TOP_FOOT_SIZE\
784
  (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
785

786
/* ---------------------------- Indexing Bins ---------------------------- */
787

788
#define is_small(s)                (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
789
#define small_index(s)                ((s)  >> SMALLBIN_SHIFT)
790
#define small_index2size(i)        ((i)  << SMALLBIN_SHIFT)
791
#define MIN_SMALL_INDEX                (small_index(MIN_CHUNK_SIZE))
792

793
/* addressing by index. See above about smallbin repositioning */
794
#define smallbin_at(M, i)        ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
795
#define treebin_at(M,i)                (&((M)->treebins[i]))
796

797
/* assign tree index for size S to variable I */
798
#define compute_tree_index(S, I)\
799
{\
800
  unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
801
  if (X == 0) {\
802
    I = 0;\
803
  } else if (X > 0xFFFF) {\
804
    I = NTREEBINS-1;\
805
  } else {\
806
    unsigned int K = lj_fls(X);\
807
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
808
  }\
809
}
810

811
/* Bit representing maximum resolved size in a treebin at i */
812
#define bit_for_tree_index(i) \
813
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
814

815
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
816
#define leftshift_for_tree_index(i) \
817
   ((i == NTREEBINS-1)? 0 : \
818
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
819

820
/* The size of the smallest chunk held in bin with index i */
821
#define minsize_for_tree_index(i) \
822
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
823
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
824

825
/* ------------------------ Operations on bin maps ----------------------- */
826

827
/* bit corresponding to given index */
828
#define idx2bit(i)                ((binmap_t)(1) << (i))
829

830
/* Mark/Clear bits with given index */
831
#define mark_smallmap(M,i)        ((M)->smallmap |=  idx2bit(i))
832
#define clear_smallmap(M,i)        ((M)->smallmap &= ~idx2bit(i))
833
#define smallmap_is_marked(M,i)        ((M)->smallmap &   idx2bit(i))
834

835
#define mark_treemap(M,i)        ((M)->treemap  |=  idx2bit(i))
836
#define clear_treemap(M,i)        ((M)->treemap  &= ~idx2bit(i))
837
#define treemap_is_marked(M,i)        ((M)->treemap  &   idx2bit(i))
838

839
/* mask with all bits to left of least bit of x on */
840
#define left_bits(x)                ((x<<1) | (~(x<<1)+1))
841

842
/* Set cinuse bit and pinuse bit of next chunk */
843
#define set_inuse(M,p,s)\
844
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
845
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
846

847
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
848
#define set_inuse_and_pinuse(M,p,s)\
849
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
850
  ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
851

852
/* Set size, cinuse and pinuse bit of this chunk */
853
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
854
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
855

856
/* ----------------------- Operations on smallbins ----------------------- */
857

858
/* Link a free chunk into a smallbin  */
859
#define insert_small_chunk(M, P, S) {\
860
  bindex_t I = small_index(S);\
861
  mchunkptr B = smallbin_at(M, I);\
862
  mchunkptr F = B;\
863
  if (!smallmap_is_marked(M, I))\
864
    mark_smallmap(M, I);\
865
  else\
866
    F = B->fd;\
867
  B->fd = P;\
868
  F->bk = P;\
869
  P->fd = F;\
870
  P->bk = B;\
871
}
872

873
/* Unlink a chunk from a smallbin  */
874
#define unlink_small_chunk(M, P, S) {\
875
  mchunkptr F = P->fd;\
876
  mchunkptr B = P->bk;\
877
  bindex_t I = small_index(S);\
878
  if (F == B) {\
879
    clear_smallmap(M, I);\
880
  } else {\
881
    F->bk = B;\
882
    B->fd = F;\
883
  }\
884
}
885

886
/* Unlink the first chunk from a smallbin */
887
#define unlink_first_small_chunk(M, B, P, I) {\
888
  mchunkptr F = P->fd;\
889
  if (B == F) {\
890
    clear_smallmap(M, I);\
891
  } else {\
892
    B->fd = F;\
893
    F->bk = B;\
894
  }\
895
}
896

897
/* Replace dv node, binning the old one */
898
/* Used only when dvsize known to be small */
899
#define replace_dv(M, P, S) {\
900
  size_t DVS = M->dvsize;\
901
  if (DVS != 0) {\
902
    mchunkptr DV = M->dv;\
903
    insert_small_chunk(M, DV, DVS);\
904
  }\
905
  M->dvsize = S;\
906
  M->dv = P;\
907
}
908

909
/* ------------------------- Operations on trees ------------------------- */
910

911
/* Insert chunk into tree */
912
#define insert_large_chunk(M, X, S) {\
913
  tbinptr *H;\
914
  bindex_t I;\
915
  compute_tree_index(S, I);\
916
  H = treebin_at(M, I);\
917
  X->index = I;\
918
  X->child[0] = X->child[1] = 0;\
919
  if (!treemap_is_marked(M, I)) {\
920
    mark_treemap(M, I);\
921
    *H = X;\
922
    X->parent = (tchunkptr)H;\
923
    X->fd = X->bk = X;\
924
  } else {\
925
    tchunkptr T = *H;\
926
    size_t K = S << leftshift_for_tree_index(I);\
927
    for (;;) {\
928
      if (chunksize(T) != S) {\
929
        tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
930
        K <<= 1;\
931
        if (*C != 0) {\
932
          T = *C;\
933
        } else {\
934
          *C = X;\
935
          X->parent = T;\
936
          X->fd = X->bk = X;\
937
          break;\
938
        }\
939
      } else {\
940
        tchunkptr F = T->fd;\
941
        T->fd = F->bk = X;\
942
        X->fd = F;\
943
        X->bk = T;\
944
        X->parent = 0;\
945
        break;\
946
      }\
947
    }\
948
  }\
949
}
950

951
#define unlink_large_chunk(M, X) {\
952
  tchunkptr XP = X->parent;\
953
  tchunkptr R;\
954
  if (X->bk != X) {\
955
    tchunkptr F = X->fd;\
956
    R = X->bk;\
957
    F->bk = R;\
958
    R->fd = F;\
959
  } else {\
960
    tchunkptr *RP;\
961
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
962
        ((R = *(RP = &(X->child[0]))) != 0)) {\
963
      tchunkptr *CP;\
964
      while ((*(CP = &(R->child[1])) != 0) ||\
965
             (*(CP = &(R->child[0])) != 0)) {\
966
        R = *(RP = CP);\
967
      }\
968
      *RP = 0;\
969
    }\
970
  }\
971
  if (XP != 0) {\
972
    tbinptr *H = treebin_at(M, X->index);\
973
    if (X == *H) {\
974
      if ((*H = R) == 0) \
975
        clear_treemap(M, X->index);\
976
    } else {\
977
      if (XP->child[0] == X) \
978
        XP->child[0] = R;\
979
      else \
980
        XP->child[1] = R;\
981
    }\
982
    if (R != 0) {\
983
      tchunkptr C0, C1;\
984
      R->parent = XP;\
985
      if ((C0 = X->child[0]) != 0) {\
986
        R->child[0] = C0;\
987
        C0->parent = R;\
988
      }\
989
      if ((C1 = X->child[1]) != 0) {\
990
        R->child[1] = C1;\
991
        C1->parent = R;\
992
      }\
993
    }\
994
  }\
995
}
996

997
/* Relays to large vs small bin operations */
998

999
#define insert_chunk(M, P, S)\
1000
  if (is_small(S)) { insert_small_chunk(M, P, S)\
1001
  } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
1002

1003
#define unlink_chunk(M, P, S)\
1004
  if (is_small(S)) { unlink_small_chunk(M, P, S)\
1005
  } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
1006

1007
/* -----------------------  Direct-mmapping chunks ----------------------- */
1008

1009
static void *direct_alloc(size_t nb)
2,960✔
1010
{
1011
#if LUAJIT_USE_ASAN
1012
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK) - TOTAL_REDZONE_SIZE;
1013
#else
1014
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2,960✔
1015
#endif
1016
  if (LJ_LIKELY(mmsize > nb)) {     /* Check for wrap around 0 */
2,960✔
1017
    char *mm = (char *)(DIRECT_MMAP(mmsize));
2,960✔
1018
    if (mm != CMFAIL) {
2,960✔
1019
      size_t offset = align_offset(chunk2mem(mm));
2,960✔
1020
      size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
2,960✔
1021
      mchunkptr p = (mchunkptr)(mm + offset);
2,960✔
1022
      p->prev_foot = offset | IS_DIRECT_BIT;
2,960✔
1023
      p->head = psize|CINUSE_BIT;
2,960✔
1024
      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2,960✔
1025
      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2,960✔
1026
      return chunk2mem(p);
2,960✔
1027
    }
1028
  }
1029
  return NULL;
1030
}
1031

1032
static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
109✔
1033
{
1034
  size_t oldsize = chunksize(oldp);
109✔
1035
  if (is_small(nb)) /* Can't shrink direct regions below small size */
109✔
1036
    return NULL;
1037
  /* Keep old chunk if big enough but not too big */
1038
  if (oldsize >= nb + SIZE_T_SIZE &&
106✔
1039
      (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
61✔
1040
    return oldp;
1041
  } else {
1042
    size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
75✔
1043
    size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
75✔
1044
    size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
75✔
1045
    char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
75✔
1046
                                   oldmmsize, newmmsize, CALL_MREMAP_MV);
1047
    if (cp != CMFAIL) {
75✔
1048
      mchunkptr newp = (mchunkptr)(cp + offset);
75✔
1049
      size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
75✔
1050
      newp->head = psize|CINUSE_BIT;
75✔
1051
      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
75✔
1052
      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
75✔
1053
      return newp;
75✔
1054
    }
1055
  }
1056
  return NULL;
1057
}
1058

1059
/* -------------------------- mspace management -------------------------- */
1060

1061
/* Initialize top chunk and its size */
1062
static void init_top(mstate m, mchunkptr p, size_t psize)
454✔
1063
{
1064
  /* Ensure alignment */
1065
  void *t = chunk2mem(p);
454✔
1066
#if LUAJIT_USE_ASAN
1067
  t -= REDZONE_SIZE;
1068
#endif
1069
  size_t offset = align_offset(t);
×
1070

1071
  p = (mchunkptr)((char *)p + offset);
454✔
1072
  psize -= offset;
454✔
1073

1074
  m->top = p;
454✔
1075
  m->topsize = psize;
454✔
1076
  p->head = psize | PINUSE_BIT;
454✔
1077
  /* set size of fake trailing chunk holding overhead space only once */
1078
  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
454✔
1079
  m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
454✔
1080
}
78✔
1081

1082
/* Initialize bins for a new mstate that is otherwise zeroed out */
1083
static void init_bins(mstate m)
326✔
1084
{
1085
  /* Establish circular links for smallbins */
1086
  bindex_t i;
326✔
1087
  for (i = 0; i < NSMALLBINS; i++) {
10,758✔
1088
    sbinptr bin = smallbin_at(m,i);
10,432✔
1089
    bin->fd = bin->bk = bin;
10,432✔
1090
  }
1091
}
1092

1093
/* Allocate chunk and prepend remainder with chunk in successor base. */
1094
static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
17,839✔
1095
{
1096
  mchunkptr p = align_as_chunk(newbase);
17,839✔
1097
  mchunkptr oldfirst = align_as_chunk(oldbase);
17,839✔
1098
  size_t psize = (size_t)((char *)oldfirst - (char *)p);
17,839✔
1099
  mchunkptr q = chunk_plus_offset(p, nb);
17,839✔
1100
  size_t qsize = psize - nb;
17,839✔
1101
  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
17,839✔
1102

1103
  /* consolidate remainder with first chunk of old base */
1104
  if (oldfirst == m->top) {
17,839✔
1105
    size_t tsize = m->topsize += qsize;
×
1106
    m->top = q;
×
1107
    q->head = tsize | PINUSE_BIT;
×
1108
  } else if (oldfirst == m->dv) {
17,839✔
1109
    size_t dsize = m->dvsize += qsize;
×
1110
    m->dv = q;
×
1111
    set_size_and_pinuse_of_free_chunk(q, dsize);
×
1112
  } else {
1113
    if (!cinuse(oldfirst)) {
17,839✔
1114
      size_t nsize = chunksize(oldfirst);
7✔
1115
      unlink_chunk(m, oldfirst, nsize);
8✔
1116
      oldfirst = chunk_plus_offset(oldfirst, nsize);
7✔
1117
      qsize += nsize;
7✔
1118
    }
1119
    set_free_with_pinuse(q, qsize, oldfirst);
17,839✔
1120
    insert_chunk(m, q, qsize);
17,839✔
1121
  }
1122

1123
  return chunk2mem(p);
17,839✔
1124
}
1125

1126
/* Add a segment to hold a new noncontiguous region */
1127
static void add_segment(mstate m, char *tbase, size_t tsize)
50✔
1128
{
1129
  /* Determine locations and sizes of segment, fenceposts, old top */
1130
  char *old_top = (char *)m->top;
50✔
1131
  msegmentptr oldsp = segment_holding(m, old_top);
50✔
1132
#if LUAJIT_USE_ASAN
1133
  ASAN_UNPOISON_MEMORY_REGION(oldsp, sizeof(struct malloc_segment));
1134
#endif
1135
  char *old_end = oldsp->base + oldsp->size;
50✔
1136
  size_t ssize = pad_request(sizeof(struct malloc_segment));
50✔
1137
  char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
50✔
1138
  size_t offset = align_offset(chunk2mem(rawsp));
50✔
1139
  char *asp = rawsp + offset;
50✔
1140
  char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
50✔
1141
  mchunkptr sp = (mchunkptr)csp;
50✔
1142
  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
50✔
1143
#if LUAJIT_USE_ASAN
1144
  ss = (msegmentptr)((void *)ss - REDZONE_SIZE);
1145
#endif
1146
  mchunkptr tnext = chunk_plus_offset(sp, ssize);
50✔
1147
  mchunkptr p = tnext;
50✔
1148

1149
  /* reset top to new space */
1150
  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
50✔
1151

1152
  /* Set up segment record */
1153
  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
50✔
1154
  *ss = m->seg; /* Push current record */
50✔
1155
  m->seg.base = tbase;
50✔
1156
  m->seg.size = tsize;
50✔
1157
  m->seg.next = ss;
50✔
1158

1159
  /* Insert trailing fenceposts */
1160
  for (;;) {
180✔
1161
    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
180✔
1162
    p->head = FENCEPOST_HEAD;
180✔
1163
    if ((char *)(&(nextp->head)) < old_end)
180✔
1164
      p = nextp;
1165
    else
1166
      break;
1167
  }
1168

1169
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1170
  if (csp != old_top) {
50✔
1171
    mchunkptr q = (mchunkptr)old_top;
37✔
1172
    size_t psize = (size_t)(csp - old_top);
37✔
1173
    mchunkptr tn = chunk_plus_offset(q, psize);
37✔
1174
    set_free_with_pinuse(q, psize, tn);
37✔
1175
    insert_chunk(m, q, psize);
53✔
1176
  }
1177
}
50✔
1178

1179
/* -------------------------- System allocation -------------------------- */
1180

1181
static void *alloc_sys(mstate m, size_t nb)
20,907✔
1182
{
1183
  char *tbase = CMFAIL;
20,907✔
1184
  size_t tsize = 0;
20,907✔
1185

1186
  /* Directly map large chunks */
1187
  if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
20,907✔
1188
    void *mem = direct_alloc(nb);
2,960✔
1189
    if (mem != 0)
2,960✔
1190
      return mem;
1191
  }
1192

1193
  {
1194
    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
17,947✔
1195
#if LUAJIT_USE_ASAN
1196
    size_t rsize = granularity_align(req) - TOTAL_REDZONE_SIZE;
1197
#else
1198
    size_t rsize = granularity_align(req);
17,947✔
1199
#endif
1200
    if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
17,947✔
1201
      char *mp = (char *)(CALL_MMAP(rsize));
17,947✔
1202
      if (mp != CMFAIL) {
17,947✔
1203
        tbase = mp;
17,947✔
1204
        tsize = rsize;
17,947✔
1205
      }
1206
    }
1207
  }
1208

1209
  if (tbase != CMFAIL) {
17,947✔
1210
    msegmentptr sp = &m->seg;
17,947✔
1211
    /* Try to merge with an existing segment */
1212
    while (sp != 0 && tbase != sp->base + sp->size)
238,041✔
1213
      sp = sp->next;
220,094✔
1214
    if (sp != 0 && segment_holds(sp, m->top)) { /* append */
17,947✔
1215
      sp->size += tsize;
58✔
1216
      init_top(m, m->top, m->topsize + tsize);
58✔
1217
    } else {
1218
      sp = &m->seg;
1219
      while (sp != 0 && sp->base != tbase + tsize)
30,489✔
1220
        sp = sp->next;
12,600✔
1221
      if (sp != 0) {
17,889✔
1222
        char *oldbase = sp->base;
17,839✔
1223
        sp->base = tbase;
17,839✔
1224
        sp->size += tsize;
17,839✔
1225
        return prepend_alloc(m, tbase, oldbase, nb);
17,839✔
1226
      } else {
1227
        add_segment(m, tbase, tsize);
50✔
1228
      }
1229
    }
1230

1231
    if (nb < m->topsize) { /* Allocate from new or extended top space */
108✔
1232
      size_t rsize = m->topsize -= nb;
108✔
1233
      mchunkptr p = m->top;
108✔
1234
      mchunkptr r = m->top = chunk_plus_offset(p, nb);
108✔
1235
      r->head = rsize | PINUSE_BIT;
108✔
1236
      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
108✔
1237
      return chunk2mem(p);
108✔
1238
    }
1239
  }
1240

1241
  return NULL;
1242
}
1243

1244
/* -----------------------  system deallocation -------------------------- */
1245

1246
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1247
static size_t release_unused_segments(mstate m)
279,155✔
1248
{
1249
  size_t released = 0;
279,155✔
1250
  size_t nsegs = 0;
279,155✔
1251
  msegmentptr pred = &m->seg;
279,155✔
1252
  msegmentptr sp = pred->next;
279,155✔
1253
  while (sp != 0) {
3,755,711✔
1254
    char *base = sp->base;
3,476,556✔
1255
    size_t size = sp->size;
3,476,556✔
1256
    msegmentptr next = sp->next;
3,476,556✔
1257
    nsegs++;
3,476,556✔
1258
    {
1259
      mchunkptr p = align_as_chunk(base);
3,476,556✔
1260
      size_t psize = chunksize(p);
3,476,556✔
1261
      /* Can unmap if first chunk holds entire segment and not pinned */
1262
      if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
3,476,556✔
1263
        tchunkptr tp = (tchunkptr)p;
14✔
1264
        if (p == m->dv) {
14✔
1265
          m->dv = 0;
×
1266
          m->dvsize = 0;
×
1267
        } else {
1268
          unlink_large_chunk(m, tp);
14✔
1269
        }
1270
        if (CALL_MUNMAP(base, size) == 0) {
14✔
1271
          released += size;
14✔
1272
          /* unlink obsoleted record */
1273
          sp = pred;
14✔
1274
          sp->next = next;
14✔
1275
        } else { /* back out if cannot unmap */
1276
          insert_large_chunk(m, tp, psize);
×
1277
        }
1278
      }
1279
    }
1280
    pred = sp;
1281
    sp = next;
1282
  }
1283
  /* Reset check counter */
1284
  m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
279,155✔
1285
                      nsegs : MAX_RELEASE_CHECK_RATE;
279,155✔
1286
  return released;
279,155✔
1287
}
1288

1289
static int alloc_trim(mstate m, size_t pad)
20✔
1290
{
1291
  size_t released = 0;
20✔
1292
  if (pad < MAX_REQUEST && is_initialized(m)) {
20✔
1293
    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
20✔
1294

1295
    if (m->topsize > pad) {
20✔
1296
      /* Shrink top space in granularity-size units, keeping at least one */
1297
      size_t unit = DEFAULT_GRANULARITY;
20✔
1298
      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
20✔
1299
                      SIZE_T_ONE) * unit;
1300
      msegmentptr sp = segment_holding(m, (char *)m->top);
20✔
1301

1302
      if (sp->size >= extra &&
20✔
1303
          !has_segment_link(m, sp)) { /* can't shrink if pinned */
40✔
1304
        size_t newsize = sp->size - extra;
20✔
1305
        /* Prefer mremap, fall back to munmap */
1306
        if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
20✔
1307
            (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
×
1308
          released = extra;
20✔
1309
        }
1310
      }
1311

1312
      if (released != 0) {
20✔
1313
        sp->size -= released;
20✔
1314
        init_top(m, m->top, m->topsize - released);
20✔
1315
      }
1316
    }
1317

1318
    /* Unmap any unused mmapped segments */
1319
    released += release_unused_segments(m);
20✔
1320

1321
    /* On failure, disable autotrim to avoid repeated failed future calls */
1322
    if (released == 0 && m->topsize > m->trim_check)
20✔
1323
      m->trim_check = MAX_SIZE_T;
×
1324
  }
1325

1326
  return (released != 0)? 1 : 0;
20✔
1327
}
1328

1329
/* ---------------------------- malloc support --------------------------- */
1330

1331
/* allocate a large request from the best fitting chunk in a treebin */
1332
static void *tmalloc_large(mstate m, size_t nb)
284,371✔
1333
{
1334
  tchunkptr v = 0;
284,371✔
1335
  size_t rsize = ~nb+1; /* Unsigned negation */
284,371✔
1336
  tchunkptr t;
284,371✔
1337
  bindex_t idx;
284,371✔
1338
  compute_tree_index(nb, idx);
284,371✔
1339

1340
  if ((t = *treebin_at(m, idx)) != 0) {
284,371✔
1341
    /* Traverse tree for this bin looking for node with size == nb */
1342
    size_t sizebits = nb << leftshift_for_tree_index(idx);
125,490✔
1343
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
125,490✔
1344
    for (;;) {
254,130✔
1345
      tchunkptr rt;
189,810✔
1346
      size_t trem = chunksize(t) - nb;
189,810✔
1347
      if (trem < rsize) {
189,810✔
1348
        v = t;
98,453✔
1349
        if ((rsize = trem) == 0)
98,453✔
1350
          break;
1351
      }
1352
      rt = t->child[1];
170,369✔
1353
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
170,369✔
1354
      if (rt != 0 && rt != t)
170,369✔
1355
        rst = rt;
54,724✔
1356
      if (t == 0) {
170,369✔
1357
        t = rst; /* set t to least subtree holding sizes > nb */
1358
        break;
1359
      }
1360
      sizebits <<= 1;
64,320✔
1361
    }
1362
  }
1363

1364
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
284,371✔
1365
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
221,509✔
1366
    if (leftbits != 0)
221,509✔
1367
      t = *treebin_at(m, lj_ffs(leftbits));
211,233✔
1368
  }
1369

1370
  while (t != 0) { /* find smallest of tree or subtree */
641,971✔
1371
    size_t trem = chunksize(t) - nb;
357,600✔
1372
    if (trem < rsize) {
357,600✔
1373
      rsize = trem;
283,695✔
1374
      v = t;
283,695✔
1375
    }
1376
    t = leftmost_child(t);
357,600✔
1377
  }
1378

1379
  /*  If dv is a better fit, return NULL so malloc will use it */
1380
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
284,371✔
1381
    mchunkptr r = chunk_plus_offset(v, nb);
238,108✔
1382
    unlink_large_chunk(m, v);
251,105✔
1383
    if (rsize < MIN_CHUNK_SIZE) {
238,108✔
1384
      set_inuse_and_pinuse(m, v, (rsize + nb));
33,701✔
1385
    } else {
1386
      set_size_and_pinuse_of_inuse_chunk(m, v, nb);
204,407✔
1387
      set_size_and_pinuse_of_free_chunk(r, rsize);
204,407✔
1388
      insert_chunk(m, r, rsize);
231,800✔
1389
    }
1390
    return chunk2mem(v);
238,108✔
1391
  }
1392
  return NULL;
1393
}
1394

1395
/* allocate a small request from the best fitting chunk in a treebin */
1396
static void *tmalloc_small(mstate m, size_t nb)
150,701✔
1397
{
1398
  tchunkptr t, v;
150,701✔
1399
  mchunkptr r;
150,701✔
1400
  size_t rsize;
150,701✔
1401
  bindex_t i = lj_ffs(m->treemap);
150,701✔
1402

1403
  v = t = *treebin_at(m, i);
150,701✔
1404
  rsize = chunksize(t) - nb;
150,701✔
1405

1406
  while ((t = leftmost_child(t)) != 0) {
345,836✔
1407
    size_t trem = chunksize(t) - nb;
195,135✔
1408
    if (trem < rsize) {
195,135✔
1409
      rsize = trem;
101,215✔
1410
      v = t;
101,215✔
1411
    }
1412
  }
1413

1414
  r = chunk_plus_offset(v, nb);
150,701✔
1415
  unlink_large_chunk(m, v);
185,327✔
1416
  if (rsize < MIN_CHUNK_SIZE) {
150,701✔
1417
    set_inuse_and_pinuse(m, v, (rsize + nb));
21✔
1418
  } else {
1419
    set_size_and_pinuse_of_inuse_chunk(m, v, nb);
150,680✔
1420
    set_size_and_pinuse_of_free_chunk(r, rsize);
150,680✔
1421
    replace_dv(m, r, rsize);
150,680✔
1422
  }
1423
  return chunk2mem(v);
150,701✔
1424
}
1425

1426
/* ----------------------------------------------------------------------- */
1427

1428
void *lj_alloc_create(void)
326✔
1429
{
1430
  size_t tsize = DEFAULT_GRANULARITY;
326✔
1431
#if LUAJIT_USE_ASAN
1432
  tsize -= TOTAL_REDZONE_SIZE;
1433
#endif
1434
  char *tbase;
326✔
1435
  INIT_MMAP();
326✔
1436
  tbase = (char *)(CALL_MMAP(tsize));
326✔
1437
  if (tbase != CMFAIL) {
326✔
1438
    size_t msize = pad_request(sizeof(struct malloc_state));
326✔
1439
    mchunkptr mn;
326✔
1440
#if LUAJIT_USE_ASAN
1441
    mchunkptr msp = (mchunkptr)(tbase + align_offset(chunk2mem(tbase) - REDZONE_SIZE));
1442
    mstate m = (mstate)(chunk2mem(msp) - REDZONE_SIZE);
1443
#else
1444
    mchunkptr msp = align_as_chunk(tbase);
326✔
1445
    mstate m = (mstate)(chunk2mem(msp));
326✔
1446
#endif
1447
    memset(m, 0, msize);
326✔
1448
    msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
326✔
1449
    m->seg.base = tbase;
326✔
1450
    m->seg.size = tsize;
326✔
1451
    m->release_checks = MAX_RELEASE_CHECK_RATE;
326✔
1452
    init_bins(m);
326✔
1453
#if LUAJIT_USE_ASAN
1454
    mn = next_chunk((mchunkptr)((char *)(m) - TWO_SIZE_T_SIZES));
1455
#else
1456
    mn = next_chunk(mem2chunk(m));
326✔
1457
#endif
1458
    init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
326✔
1459
    return m;
326✔
1460
  }
1461
  return NULL;
1462
}
1463

1464
void lj_alloc_destroy(void *msp)
316✔
1465
{
1466
  mstate ms = (mstate)msp;
316✔
1467
  msegmentptr sp = &ms->seg;
316✔
1468
  while (sp != 0) {
668✔
1469
    char *base = sp->base;
352✔
1470
    size_t size = sp->size;
352✔
1471
    sp = sp->next;
352✔
1472
#if LUAJIT_USE_ASAN
1473
    ASAN_UNPOISON_MEMORY_REGION(base, size);
1474
#endif
1475
    CALL_MUNMAP(base, size);
352✔
1476
  }
1477
}
316✔
1478

1479
static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
76,799,842✔
1480
{
1481
#if LUAJIT_USE_ASAN
1482
  size_t mem_size = nsize;
1483
  size_t poison_size = align_up_size(nsize, SIZE_ALIGNMENT) + TOTAL_REDZONE_SIZE;
1484
  nsize = poison_size;
1485
#endif
1486
  mstate ms = (mstate)msp;
76,799,842✔
1487
  void *mem;
76,799,842✔
1488
  size_t nb;
76,799,842✔
1489
  if (nsize <= MAX_SMALL_REQUEST) {
76,799,842✔
1490
    bindex_t idx;
76,480,945✔
1491
    binmap_t smallbits;
76,480,945✔
1492
    nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
76,480,945✔
1493
    idx = small_index(nb);
76,480,945✔
1494
    smallbits = ms->smallmap >> idx;
76,480,945✔
1495

1496
    if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
76,480,945✔
1497
      mchunkptr b, p;
413,860✔
1498
      idx += ~smallbits & 1;       /* Uses next bin if idx empty */
413,860✔
1499
      b = smallbin_at(ms, idx);
413,860✔
1500
      p = b->fd;
413,860✔
1501
      unlink_first_small_chunk(ms, b, p, idx);
413,860✔
1502
      set_inuse_and_pinuse(ms, p, small_index2size(idx));
413,860✔
1503
      mem = chunk2mem(p);
413,860✔
1504
#if LUAJIT_USE_ASAN
1505
      mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1506
#endif
1507
      return mem;
413,860✔
1508
    } else if (nb > ms->dvsize) {
76,067,085✔
1509
      if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
673,728✔
1510
        mchunkptr b, p, r;
128,361✔
1511
        size_t rsize;
128,361✔
1512
        binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
128,361✔
1513
        bindex_t i = lj_ffs(leftbits);
128,361✔
1514
        b = smallbin_at(ms, i);
128,361✔
1515
        p = b->fd;
128,361✔
1516
        unlink_first_small_chunk(ms, b, p, i);
128,361✔
1517
        rsize = small_index2size(i) - nb;
128,361✔
1518
        /* Fit here cannot be remainderless if 4byte sizes */
1519
        if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
128,361✔
1520
          set_inuse_and_pinuse(ms, p, small_index2size(i));
22,393✔
1521
        } else {
1522
          set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
105,968✔
1523
          r = chunk_plus_offset(p, nb);
105,968✔
1524
          set_size_and_pinuse_of_free_chunk(r, rsize);
105,968✔
1525
          replace_dv(ms, r, rsize);
105,968✔
1526
        }
1527
        mem = chunk2mem(p);
128,361✔
1528
#if LUAJIT_USE_ASAN
1529
  mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1530
#endif
1531
        return mem;
128,361✔
1532
      } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
545,367✔
1533
#if LUAJIT_USE_ASAN
1534
  mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1535
#endif
1536
        return mem;
1537
      }
1538
    }
1539
  } else if (nsize >= MAX_REQUEST) {
318,897✔
1540
    nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1541
  } else {
1542
    nb = pad_request(nsize);
318,897✔
1543
    if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
318,897✔
1544
#if LUAJIT_USE_ASAN
1545
      mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1546
#endif
1547
      return mem;
1548
    }
1549
  }
1550

1551
  if (nb <= ms->dvsize) {
75,868,812✔
1552
    size_t rsize = ms->dvsize - nb;
75,447,454✔
1553
    mchunkptr p = ms->dv;
75,447,454✔
1554
    if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
75,447,454✔
1555
      mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
75,302,105✔
1556
      ms->dvsize = rsize;
75,302,105✔
1557
      set_size_and_pinuse_of_free_chunk(r, rsize);
75,302,105✔
1558
      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
75,302,105✔
1559
    } else { /* exhaust dv */
1560
      size_t dvs = ms->dvsize;
145,349✔
1561
      ms->dvsize = 0;
145,349✔
1562
      ms->dv = 0;
145,349✔
1563
      set_inuse_and_pinuse(ms, p, dvs);
145,349✔
1564
    }
1565
    mem = chunk2mem(p);
75,447,454✔
1566
#if LUAJIT_USE_ASAN
1567
    mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1568
#endif
1569
    return mem;
75,447,454✔
1570
  } else if (nb < ms->topsize) { /* Split top */
421,358✔
1571
    size_t rsize = ms->topsize -= nb;
400,451✔
1572
    mchunkptr p = ms->top;
400,451✔
1573
    mchunkptr r = ms->top = chunk_plus_offset(p, nb);
400,451✔
1574
    r->head = rsize | PINUSE_BIT;
400,451✔
1575
    set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
400,451✔
1576
    mem = chunk2mem(p);
400,451✔
1577
#if LUAJIT_USE_ASAN
1578
    mem = mark_memory_region(mem - REDZONE_SIZE, mem_size, poison_size);
1579
#endif
1580
    return mem;
400,451✔
1581
  }
1582
#if LUAJIT_USE_ASAN
1583
  return mark_memory_region(alloc_sys(ms, nb) - REDZONE_SIZE, mem_size, poison_size);
1584
#else
1585
  return alloc_sys(ms, nb);
20,907✔
1586
#endif
1587
}
1588

1589
static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
76,820,701✔
1590
{
1591
#if LUAJIT_USE_ASAN
1592
  if (ptr != 0) {    
1593
    size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1594
    size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1595

1596
    memmove(ptr, ptr, mem_size);
1597
    ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE, poison_size);
1598
  }
1599
  return NULL;
1600
#else
1601
  if (ptr != 0) {
76,820,701✔
1602
    mchunkptr p = mem2chunk(ptr);
76,792,118✔
1603
    mstate fm = (mstate)msp;
76,792,118✔
1604
    size_t psize = chunksize(p);
76,792,118✔
1605
    mchunkptr next = chunk_plus_offset(p, psize);
76,792,118✔
1606
    if (!pinuse(p)) {
76,792,118✔
1607
      size_t prevsize = p->prev_foot;
2,635,021✔
1608
      if ((prevsize & IS_DIRECT_BIT) != 0) {
2,635,021✔
1609
        prevsize &= ~IS_DIRECT_BIT;
2,960✔
1610
        psize += prevsize + DIRECT_FOOT_PAD;
2,960✔
1611
        CALL_MUNMAP((char *)p - prevsize, psize);
2,960✔
1612
        return NULL;
2,960✔
1613
      } else {
1614
        mchunkptr prev = chunk_minus_offset(p, prevsize);
2,632,061✔
1615
        psize += prevsize;
2,632,061✔
1616
        p = prev;
2,632,061✔
1617
        /* consolidate backward */
1618
        if (p != fm->dv) {
2,632,061✔
1619
          unlink_chunk(fm, p, prevsize);
2,556,286✔
1620
        } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
145,134✔
1621
          fm->dvsize = psize;
3,983✔
1622
          set_free_with_pinuse(p, psize, next);
3,983✔
1623
          return NULL;
3,983✔
1624
        }
1625
      }
1626
    }
1627
    if (!cinuse(next)) {  /* consolidate forward */
76,785,175✔
1628
      if (next == fm->top) {
73,559,429✔
1629
        size_t tsize = fm->topsize += psize;
26,965✔
1630
        fm->top = p;
26,965✔
1631
        p->head = tsize | PINUSE_BIT;
26,965✔
1632
        if (p == fm->dv) {
26,965✔
1633
          fm->dv = 0;
231✔
1634
          fm->dvsize = 0;
231✔
1635
        }
1636
        if (tsize > fm->trim_check)
26,965✔
1637
          alloc_trim(fm, 0);
20✔
1638
        return NULL;
26,965✔
1639
      } else if (next == fm->dv) {
73,532,464✔
1640
        size_t dsize = fm->dvsize += psize;
612,961✔
1641
        fm->dv = p;
612,961✔
1642
        set_size_and_pinuse_of_free_chunk(p, dsize);
612,961✔
1643
        return NULL;
612,961✔
1644
      } else {
1645
        size_t nsize = chunksize(next);
72,919,503✔
1646
        psize += nsize;
72,919,503✔
1647
        unlink_chunk(fm, next, nsize);
72,989,373✔
1648
        set_size_and_pinuse_of_free_chunk(p, psize);
72,919,503✔
1649
        if (p == fm->dv) {
72,919,503✔
1650
          fm->dvsize = psize;
140,920✔
1651
          return NULL;
140,920✔
1652
        }
1653
      }
1654
    } else {
1655
      set_free_with_pinuse(p, psize, next);
3,225,746✔
1656
    }
1657

1658
    if (is_small(psize)) {
76,004,329✔
1659
      insert_small_chunk(fm, p, psize);
4,772,104✔
1660
    } else {
1661
      tchunkptr tp = (tchunkptr)p;
71,232,225✔
1662
      insert_large_chunk(fm, tp, psize);
75,803,950✔
1663
      if (--fm->release_checks == 0)
71,232,225✔
1664
        release_unused_segments(fm);
279,135✔
1665
    }
1666
  }
1667
  return NULL;
1668
#endif
1669
}
1670

1671
static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
121,778✔
1672
{
1673
#if LUAJIT_USE_ASAN
1674
  if (nsize >= MAX_REQUEST)
1675
    return NULL;
1676

1677
  mstate m = (mstate)msp;
1678

1679
  size_t mem_size = asan_get_size(ptr, MEM_SIZE);
1680
  size_t poison_size = asan_get_size(ptr, POISON_SIZE);
1681

1682
  void *newmem = lj_alloc_malloc(m, nsize);
1683

1684
  if (newmem == NULL)
1685
    return NULL;
1686

1687
  memcpy(newmem, ptr, nsize > mem_size ? mem_size : nsize);
1688
  ASAN_POISON_MEMORY_REGION(ptr - REDZONE_SIZE, poison_size);
1689
  return newmem;
1690
#else
1691
  if (nsize >= MAX_REQUEST) {
121,778✔
1692
    return NULL;
1693
  } else {
1694
    mstate m = (mstate)msp;
121,778✔
1695
    mchunkptr oldp = mem2chunk(ptr);
121,778✔
1696
    size_t oldsize = chunksize(oldp);
121,778✔
1697
    mchunkptr next = chunk_plus_offset(oldp, oldsize);
121,778✔
1698
    mchunkptr newp = 0;
121,778✔
1699
    size_t nb = request2size(nsize);
121,778✔
1700

1701
    /* Try to either shrink or extend into top. Else malloc-copy-free */
1702
    if (is_direct(oldp)) {
121,778✔
1703
      newp = direct_resize(oldp, nb);  /* this may return NULL. */
109✔
1704
    } else if (oldsize >= nb) { /* already big enough */
121,669✔
1705
      size_t rsize = oldsize - nb;
655✔
1706
      newp = oldp;
655✔
1707
      if (rsize >= MIN_CHUNK_SIZE) {
655✔
1708
        mchunkptr rem = chunk_plus_offset(newp, nb);
647✔
1709
        set_inuse(m, newp, nb);
647✔
1710
        set_inuse(m, rem, rsize);
647✔
1711
        lj_alloc_free(m, chunk2mem(rem));
647✔
1712
      }
1713
    } else if (next == m->top && oldsize + m->topsize > nb) {
121,014✔
1714
      /* Expand into top */
1715
      size_t newsize = oldsize + m->topsize;
523✔
1716
      size_t newtopsize = newsize - nb;
523✔
1717
      mchunkptr newtop = chunk_plus_offset(oldp, nb);
523✔
1718
      set_inuse(m, oldp, nb);
523✔
1719
      newtop->head = newtopsize |PINUSE_BIT;
523✔
1720
      m->top = newtop;
523✔
1721
      m->topsize = newtopsize;
523✔
1722
      newp = oldp;
523✔
1723
    }
1724

1725
    if (newp != 0) {
1,287✔
1726
      return chunk2mem(newp);
1,284✔
1727
    } else {
1728
      void *newmem = lj_alloc_malloc(m, nsize);
120,494✔
1729
      if (newmem != 0) {
120,494✔
1730
        size_t oc = oldsize - overhead_for(oldp);
120,494✔
1731
        memcpy(newmem, ptr, oc < nsize ? oc : nsize);
120,494✔
1732
        lj_alloc_free(m, ptr);
120,494✔
1733
      }
1734
      return newmem;
120,494✔
1735
    }
1736
  }
1737
#endif
1738
}
1739

1740
void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
153,500,686✔
1741
{
1742
  (void)osize;
153,500,686✔
1743
  if (nsize == 0) {
153,500,686✔
1744
    return lj_alloc_free(msp, ptr);
76,699,560✔
1745
  } else if (ptr == NULL) {
76,801,126✔
1746
    return lj_alloc_malloc(msp, nsize);
76,679,348✔
1747
  } else {
1748
    return lj_alloc_realloc(msp, ptr, nsize);
121,778✔
1749
  }
1750
}
1751

1752
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc