• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 6035198545

31 Aug 2023 08:55AM UTC coverage: 88.225% (+0.4%) from 87.822%
6035198545

push

github

fckxorg
test: don't skip tool CLI flag for tarantool

That skipcond was introduced to overcome the obstacles
of LuaJIT's integration testing in Tarantool. Since
the required patch is now in the Tarantool master, this
skipcond is now unnecessary.

Related to tarantool/tarantool#5688

5340 of 5975 branches covered (0.0%)

Branch coverage included in aggregate %.

20495 of 23308 relevant lines covered (87.93%)

1297339.67 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.32
/src/lj_mcode.c
1
/*
2
** Machine code management.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_mcode_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10
#if LJ_HASJIT
11
#include "lj_gc.h"
12
#include "lj_err.h"
13
#include "lj_jit.h"
14
#include "lj_mcode.h"
15
#include "lj_trace.h"
16
#include "lj_dispatch.h"
17
#endif
18
#if LJ_HASJIT || LJ_HASFFI
19
#include "lj_vm.h"
20
#endif
21

22
/* -- OS-specific functions ----------------------------------------------- */
23

24
#if LJ_HASJIT || LJ_HASFFI
25

26
/* Define this if you want to run LuaJIT with Valgrind. */
27
#ifdef LUAJIT_USE_VALGRIND
28
#include <valgrind/valgrind.h>
29
#endif
30

31
#if LJ_TARGET_IOS
32
void sys_icache_invalidate(void *start, size_t len);
33
#endif
34

35
/* Synchronize data/instruction cache. */
36
void lj_mcode_sync(void *start, void *end)
6,010✔
37
{
38
#ifdef LUAJIT_USE_VALGRIND
39
  VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
40
#endif
41
#if LJ_TARGET_X86ORX64
42
  UNUSED(start); UNUSED(end);
6,010✔
43
#elif LJ_TARGET_IOS
44
  sys_icache_invalidate(start, (char *)end-(char *)start);
45
#elif LJ_TARGET_PPC
46
  lj_vm_cachesync(start, end);
47
#elif defined(__GNUC__)
48
  __clear_cache(start, end);
49
#else
50
#error "Missing builtin to flush instruction cache"
51
#endif
52
}
6,010✔
53

54
#endif
55

56
#if LJ_HASJIT
57

58
#if LJ_TARGET_WINDOWS
59

60
#define WIN32_LEAN_AND_MEAN
61
#include <windows.h>
62

63
#define MCPROT_RW        PAGE_READWRITE
64
#define MCPROT_RX        PAGE_EXECUTE_READ
65
#define MCPROT_RWX        PAGE_EXECUTE_READWRITE
66

67
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
68
{
69
  void *p = LJ_WIN_VALLOC((void *)hint, sz,
70
                          MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
71
  if (!p && !hint)
72
    lj_trace_err(J, LJ_TRERR_MCODEAL);
73
  return p;
74
}
75

76
static void mcode_free(jit_State *J, void *p, size_t sz)
77
{
78
  UNUSED(J); UNUSED(sz);
79
  VirtualFree(p, 0, MEM_RELEASE);
80
}
81

82
static int mcode_setprot(void *p, size_t sz, DWORD prot)
83
{
84
  DWORD oprot;
85
  return !LJ_WIN_VPROTECT(p, sz, prot, &oprot);
86
}
87

88
#elif LJ_TARGET_POSIX
89

90
#include <sys/mman.h>
91

92
#ifndef MAP_ANONYMOUS
93
#define MAP_ANONYMOUS        MAP_ANON
94
#endif
95

96
#define MCPROT_RW        (PROT_READ|PROT_WRITE)
97
#define MCPROT_RX        (PROT_READ|PROT_EXEC)
98
#define MCPROT_RWX        (PROT_READ|PROT_WRITE|PROT_EXEC)
99

100
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
403✔
101
{
102
  void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
403✔
103
  if (p == MAP_FAILED) {
403✔
104
    if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
×
105
    p = NULL;
106
  }
107
  return p;
403✔
108
}
109

110
static void mcode_free(jit_State *J, void *p, size_t sz)
401✔
111
{
112
  UNUSED(J);
401✔
113
  munmap(p, sz);
401✔
114
}
69✔
115

116
static int mcode_setprot(void *p, size_t sz, int prot)
9,362✔
117
{
118
  return mprotect(p, sz, prot);
9,362✔
119
}
120

121
#elif LJ_64
122

123
#error "Missing OS support for explicit placement of executable memory"
124

125
#else
126

127
/* Fallback allocator. This will fail if memory is not executable by default. */
128
#define LUAJIT_UNPROTECT_MCODE
129
#define MCPROT_RW        0
130
#define MCPROT_RX        0
131
#define MCPROT_RWX        0
132

133
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
134
{
135
  UNUSED(hint); UNUSED(prot);
136
  return lj_mem_new(J->L, sz);
137
}
138

139
static void mcode_free(jit_State *J, void *p, size_t sz)
140
{
141
  lj_mem_free(J2G(J), p, sz);
142
}
143

144
#endif
145

146
/* -- MCode area protection ----------------------------------------------- */
147

148
/* Define this ONLY if page protection twiddling becomes a bottleneck. */
149
#ifdef LUAJIT_UNPROTECT_MCODE
150

151
/* It's generally considered to be a potential security risk to have
152
** pages with simultaneous write *and* execute access in a process.
153
**
154
** Do not even think about using this mode for server processes or
155
** apps handling untrusted external data (such as a browser).
156
**
157
** The security risk is not in LuaJIT itself -- but if an adversary finds
158
** any *other* flaw in your C application logic, then any RWX memory page
159
** simplifies writing an exploit considerably.
160
*/
161
#define MCPROT_GEN        MCPROT_RWX
162
#define MCPROT_RUN        MCPROT_RWX
163

164
static void mcode_protect(jit_State *J, int prot)
165
{
166
  UNUSED(J); UNUSED(prot);
167
}
168

169
#else
170

171
/* This is the default behaviour and much safer:
172
**
173
** Most of the time the memory pages holding machine code are executable,
174
** but NONE of them is writable.
175
**
176
** The current memory area is marked read-write (but NOT executable) only
177
** during the short time window while the assembler generates machine code.
178
*/
179
#define MCPROT_GEN        MCPROT_RW
180
#define MCPROT_RUN        MCPROT_RX
181

182
/* Protection twiddling failed. Probably due to kernel security. */
183
static LJ_NOINLINE void mcode_protfail(jit_State *J)
×
184
{
185
  lua_CFunction panic = J2G(J)->panic;
×
186
  if (panic) {
×
187
    lua_State *L = J->L;
×
188
    setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT));
×
189
    panic(L);
×
190
  }
191
}
×
192

193
/* Change protection of MCode area. */
194
static void mcode_protect(jit_State *J, int prot)
11,246✔
195
{
196
  if (J->mcprot != prot) {
11,246✔
197
    if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot)))
7,192✔
198
      mcode_protfail(J);
×
199
    J->mcprot = prot;
7,192✔
200
  }
201
}
11,246✔
202

203
#endif
204

205
/* -- MCode area allocation ----------------------------------------------- */
206

207
#if LJ_64
208
#define mcode_validptr(p)        (p)
209
#else
210
#define mcode_validptr(p)        ((p) && (uintptr_t)(p) < 0xffff0000)
211
#endif
212

213
#ifdef LJ_TARGET_JUMPRANGE
214

215
/* Get memory within relative jump distance of our code in 64 bit mode. */
216
static void *mcode_alloc(jit_State *J, size_t sz)
334✔
217
{
218
  /* Target an address in the static assembler code (64K aligned).
219
  ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
220
  ** Use half the jump range so every address in the range can reach any other.
221
  */
222
#if LJ_TARGET_MIPS
223
  /* Use the middle of the 256MB-aligned region. */
224
  uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler &
225
                      ~(uintptr_t)0x0fffffffu) + 0x08000000u;
226
#else
227
  uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
334✔
228
#endif
229
  const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
334✔
230
  /* First try a contiguous area below the last one. */
231
  uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
334✔
232
  int i;
233
  /* Limit probing iterations, depending on the available pool size. */
234
  for (i = 0; i < LJ_TARGET_JUMPRANGE; i++) {
718✔
235
    if (mcode_validptr(hint)) {
718✔
236
      void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
403✔
237

238
      if (mcode_validptr(p) &&
403✔
239
          ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
403✔
240
        return p;
334✔
241
      if (p) mcode_free(J, p, sz);  /* Free badly placed area. */
69✔
242
    }
243
    /* Next try probing 64K-aligned pseudo-random addresses. */
244
    do {
384✔
245
      hint = LJ_PRNG_BITS(J, LJ_TARGET_JUMPRANGE-16) << 16;
384✔
246
    } while (!(hint + sz < range+range));
384✔
247
    hint = target + hint - range;
384✔
248
  }
249
  lj_trace_err(J, LJ_TRERR_MCODEAL);  /* Give up. OS probably ignores hints? */
×
250
  return NULL;
251
}
252

253
#else
254

255
/* All memory addresses are reachable by relative jumps. */
256
static void *mcode_alloc(jit_State *J, size_t sz)
257
{
258
#if defined(__OpenBSD__) || LJ_TARGET_UWP
259
  /* Allow better executable memory allocation for OpenBSD W^X mode. */
260
  void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
261
  if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
262
    mcode_free(J, p, sz);
263
    return NULL;
264
  }
265
  return p;
266
#else
267
  return mcode_alloc_at(J, 0, sz, MCPROT_GEN);
268
#endif
269
}
270

271
#endif
272

273
/* -- MCode area management ----------------------------------------------- */
274

275
/* Allocate a new MCode area. */
276
static void mcode_allocarea(jit_State *J)
334✔
277
{
278
  MCode *oldarea = J->mcarea;
334✔
279
  size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
334✔
280
  sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
334✔
281
  J->mcarea = (MCode *)mcode_alloc(J, sz);
334✔
282
  J->szmcarea = sz;
334✔
283
  J->mcprot = MCPROT_GEN;
334✔
284
  J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
334✔
285
  J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
334✔
286
  ((MCLink *)J->mcarea)->next = oldarea;
334✔
287
  ((MCLink *)J->mcarea)->size = sz;
334✔
288
  J->szallmcarea += sz;
334✔
289
  J->mcbot = (MCode *)lj_err_register_mcode(J->mcarea, sz, (uint8_t *)J->mcbot);
334✔
290
}
334✔
291

292
/* Free all MCode areas. */
293
void lj_mcode_free(jit_State *J)
522✔
294
{
295
  MCode *mc = J->mcarea;
522✔
296
  J->mcarea = NULL;
522✔
297
  J->szallmcarea = 0;
522✔
298
  while (mc) {
854✔
299
    MCode *next = ((MCLink *)mc)->next;
332✔
300
    size_t sz = ((MCLink *)mc)->size;
332✔
301
    lj_err_deregister_mcode(mc, sz, (uint8_t *)mc + sizeof(MCLink));
332✔
302
    mcode_free(J, mc, sz);
332✔
303
    mc = next;
332✔
304
  }
305
}
522✔
306

307
/* -- MCode transactions -------------------------------------------------- */
308

309
/* Reserve the remainder of the current MCode area. */
310
MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
3,744✔
311
{
312
  if (!J->mcarea)
3,744✔
313
    mcode_allocarea(J);
315✔
314
  else
315
    mcode_protect(J, MCPROT_GEN);
3,429✔
316
  *lim = J->mcbot;
3,744✔
317
  return J->mctop;
3,744✔
318
}
319

320
/* Commit the top part of the current MCode area. */
321
void lj_mcode_commit(jit_State *J, MCode *top)
3,720✔
322
{
323
  J->mctop = top;
3,720✔
324
  mcode_protect(J, MCPROT_RUN);
3,720✔
325
}
3,720✔
326

327
/* Abort the reservation. */
328
void lj_mcode_abort(jit_State *J)
1,710✔
329
{
330
  if (J->mcarea)
1,691✔
331
    mcode_protect(J, MCPROT_RUN);
1,689✔
332
}
1,691✔
333

334
/* Set/reset protection to allow patching of MCode areas. */
335
MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
4,578✔
336
{
337
#ifdef LUAJIT_UNPROTECT_MCODE
338
  UNUSED(J); UNUSED(ptr); UNUSED(finish);
339
  return NULL;
340
#else
341
  if (finish) {
4,578✔
342
    if (J->mcarea == ptr)
2,289✔
343
      mcode_protect(J, MCPROT_RUN);
1,204✔
344
    else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN)))
1,085✔
345
      mcode_protfail(J);
×
346
    return NULL;
2,289✔
347
  } else {
348
    MCode *mc = J->mcarea;
2,289✔
349
    /* Try current area first to use the protection cache. */
350
    if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
2,289✔
351
      mcode_protect(J, MCPROT_GEN);
1,204✔
352
      return mc;
1,204✔
353
    }
354
    /* Otherwise search through the list of MCode areas. */
355
    for (;;) {
1,519✔
356
      mc = ((MCLink *)mc)->next;
1,519✔
357
      lj_assertJ(mc != NULL, "broken MCode area chain");
1,519✔
358
      if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
1,519✔
359
        if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
1,085✔
360
          mcode_protfail(J);
×
361
        return mc;
1,085✔
362
      }
363
    }
364
  }
365
#endif
366
}
367

368
/* Limit of MCode reservation reached. */
369
void lj_mcode_limiterr(jit_State *J, size_t need)
19✔
370
{
371
  size_t sizemcode, maxmcode;
19✔
372
  lj_mcode_abort(J);
19✔
373
  sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
19✔
374
  sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
19✔
375
  maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
19✔
376
  if ((size_t)need > sizemcode)
19✔
377
    lj_trace_err(J, LJ_TRERR_MCODEOV);  /* Too long for any area. */
×
378
  if (J->szallmcarea + sizemcode > maxmcode)
19✔
379
    lj_trace_err(J, LJ_TRERR_MCODEAL);
×
380
  mcode_allocarea(J);
19✔
381
  lj_trace_err(J, LJ_TRERR_MCODELM);  /* Retry with new area. */
19✔
382
}
383

384
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc