• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 6123546617

08 Sep 2023 03:23PM UTC coverage: 88.259% (+0.2%) from 88.109%
6123546617

push

github

fckxorg
Fix frame for more types of on-trace error messages.

Thanks to Maxim Kokryashkin.

(cherry-picked from commit d5bbf9cdb)

This patch fixes the same issue with frame, as the previous
one, but now for the table overflow error in the `err_msgv`
function. The test for the problem uses the table of GC
finalizers, although they are not required to reproduce the
issue. They only used to make the test as simple as possible.

Resolves tarantool/tarantool#562
Part of tarantool/tarantool#8825

5342 of 5971 branches covered (0.0%)

Branch coverage included in aggregate %.

3 of 3 new or added lines in 1 file covered. (100.0%)

20486 of 23293 relevant lines covered (87.95%)

2738237.75 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

94.85
/src/lj_mcode.c
1
/*
2
** Machine code management.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_mcode_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10
#if LJ_HASJIT
11
#include "lj_gc.h"
12
#include "lj_err.h"
13
#include "lj_jit.h"
14
#include "lj_mcode.h"
15
#include "lj_trace.h"
16
#include "lj_dispatch.h"
17
#endif
18
#if LJ_HASJIT || LJ_HASFFI
19
#include "lj_vm.h"
20
#endif
21

22
/* -- OS-specific functions ----------------------------------------------- */
23

24
#if LJ_HASJIT || LJ_HASFFI
25

26
/* Define this if you want to run LuaJIT with Valgrind. */
27
#ifdef LUAJIT_USE_VALGRIND
28
#include <valgrind/valgrind.h>
29
#endif
30

31
#if LJ_TARGET_IOS
32
void sys_icache_invalidate(void *start, size_t len);
33
#endif
34

35
/* Synchronize data/instruction cache. */
36
void lj_mcode_sync(void *start, void *end)
3,180✔
37
{
38
#ifdef LUAJIT_USE_VALGRIND
39
  VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
40
#endif
41
#if LJ_TARGET_X86ORX64
42
  UNUSED(start); UNUSED(end);
3,180✔
43
#elif LJ_TARGET_IOS
44
  sys_icache_invalidate(start, (char *)end-(char *)start);
45
#elif LJ_TARGET_PPC
46
  lj_vm_cachesync(start, end);
47
#elif defined(__GNUC__)
48
  __clear_cache(start, end);
49
#else
50
#error "Missing builtin to flush instruction cache"
51
#endif
52
}
3,180✔
53

54
#endif
55

56
#if LJ_HASJIT
57

58
#if LJ_TARGET_WINDOWS
59

60
#define WIN32_LEAN_AND_MEAN
61
#include <windows.h>
62

63
#define MCPROT_RW        PAGE_READWRITE
64
#define MCPROT_RX        PAGE_EXECUTE_READ
65
#define MCPROT_RWX        PAGE_EXECUTE_READWRITE
66

67
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
68
{
69
  void *p = LJ_WIN_VALLOC((void *)hint, sz,
70
                          MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
71
  if (!p && !hint)
72
    lj_trace_err(J, LJ_TRERR_MCODEAL);
73
  return p;
74
}
75

76
static void mcode_free(jit_State *J, void *p, size_t sz)
77
{
78
  UNUSED(J); UNUSED(sz);
79
  VirtualFree(p, 0, MEM_RELEASE);
80
}
81

82
static int mcode_setprot(void *p, size_t sz, DWORD prot)
83
{
84
  DWORD oprot;
85
  return !LJ_WIN_VPROTECT(p, sz, prot, &oprot);
86
}
87

88
#elif LJ_TARGET_POSIX
89

90
#include <sys/mman.h>
91

92
#ifndef MAP_ANONYMOUS
93
#define MAP_ANONYMOUS        MAP_ANON
94
#endif
95

96
#define MCPROT_RW        (PROT_READ|PROT_WRITE)
97
#define MCPROT_RX        (PROT_READ|PROT_EXEC)
98
#define MCPROT_RWX        (PROT_READ|PROT_WRITE|PROT_EXEC)
99

100
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
388✔
101
{
102
  void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
388✔
103
  if (p == MAP_FAILED) {
388✔
104
    if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
×
105
    p = NULL;
106
  }
107
  return p;
388✔
108
}
109

110
static void mcode_free(jit_State *J, void *p, size_t sz)
385✔
111
{
112
  UNUSED(J);
385✔
113
  munmap(p, sz);
385✔
114
}
68✔
115

116
static int mcode_setprot(void *p, size_t sz, int prot)
4,702✔
117
{
118
  return mprotect(p, sz, prot);
4,702✔
119
}
120

121
#elif LJ_64
122

123
#error "Missing OS support for explicit placement of executable memory"
124

125
#else
126

127
/* Fallback allocator. This will fail if memory is not executable by default. */
128
#define LUAJIT_UNPROTECT_MCODE
129
#define MCPROT_RW        0
130
#define MCPROT_RX        0
131
#define MCPROT_RWX        0
132

133
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
134
{
135
  UNUSED(hint); UNUSED(prot);
136
  return lj_mem_new(J->L, sz);
137
}
138

139
static void mcode_free(jit_State *J, void *p, size_t sz)
140
{
141
  lj_mem_free(J2G(J), p, sz);
142
}
143

144
#endif
145

146
/* -- MCode area protection ----------------------------------------------- */
147

148
/* Define this ONLY if page protection twiddling becomes a bottleneck. */
149
#ifdef LUAJIT_UNPROTECT_MCODE
150

151
/* It's generally considered to be a potential security risk to have
152
** pages with simultaneous write *and* execute access in a process.
153
**
154
** Do not even think about using this mode for server processes or
155
** apps handling untrusted external data (such as a browser).
156
**
157
** The security risk is not in LuaJIT itself -- but if an adversary finds
158
** any *other* flaw in your C application logic, then any RWX memory page
159
** simplifies writing an exploit considerably.
160
*/
161
#define MCPROT_GEN        MCPROT_RWX
162
#define MCPROT_RUN        MCPROT_RWX
163

164
static void mcode_protect(jit_State *J, int prot)
165
{
166
  UNUSED(J); UNUSED(prot);
167
}
168

169
#else
170

171
/* This is the default behaviour and much safer:
172
**
173
** Most of the time the memory pages holding machine code are executable,
174
** but NONE of them is writable.
175
**
176
** The current memory area is marked read-write (but NOT executable) only
177
** during the short time window while the assembler generates machine code.
178
*/
179
#define MCPROT_GEN        MCPROT_RW
180
#define MCPROT_RUN        MCPROT_RX
181

182
/* Protection twiddling failed. Probably due to kernel security. */
183
static LJ_NORET LJ_NOINLINE void mcode_protfail(jit_State *J)
1✔
184
{
185
  lua_CFunction panic = J2G(J)->panic;
1✔
186
  if (panic) {
1✔
187
    lua_State *L = J->L;
1✔
188
    setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT));
1✔
189
    panic(L);
1✔
190
  }
191
  exit(EXIT_FAILURE);
1✔
192
}
193

194
/* Change protection of MCode area. */
195
static void mcode_protect(jit_State *J, int prot)
7,139✔
196
{
197
  if (J->mcprot != prot) {
7,139✔
198
    if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot)))
4,282✔
199
      mcode_protfail(J);
1✔
200
    J->mcprot = prot;
4,281✔
201
  }
202
}
7,138✔
203

204
#endif
205

206
/* -- MCode area allocation ----------------------------------------------- */
207

208
#if LJ_64
209
#define mcode_validptr(p)        (p)
210
#else
211
#define mcode_validptr(p)        ((p) && (uintptr_t)(p) < 0xffff0000)
212
#endif
213

214
#ifdef LJ_TARGET_JUMPRANGE
215

216
/* Get memory within relative jump distance of our code in 64 bit mode. */
217
static void *mcode_alloc(jit_State *J, size_t sz)
320✔
218
{
219
  /* Target an address in the static assembler code (64K aligned).
220
  ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
221
  ** Use half the jump range so every address in the range can reach any other.
222
  */
223
#if LJ_TARGET_MIPS
224
  /* Use the middle of the 256MB-aligned region. */
225
  uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler &
226
                      ~(uintptr_t)0x0fffffffu) + 0x08000000u;
227
#else
228
  uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
320✔
229
#endif
230
  const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
320✔
231
  /* First try a contiguous area below the last one. */
232
  uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
320✔
233
  int i;
234
  /* Limit probing iterations, depending on the available pool size. */
235
  for (i = 0; i < LJ_TARGET_JUMPRANGE; i++) {
701✔
236
    if (mcode_validptr(hint)) {
701✔
237
      void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
388✔
238

239
      if (mcode_validptr(p) &&
388✔
240
          ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
388✔
241
        return p;
320✔
242
      if (p) mcode_free(J, p, sz);  /* Free badly placed area. */
68✔
243
    }
244
    /* Next try probing 64K-aligned pseudo-random addresses. */
245
    do {
381✔
246
      hint = LJ_PRNG_BITS(J, LJ_TARGET_JUMPRANGE-16) << 16;
381✔
247
    } while (!(hint + sz < range+range));
381✔
248
    hint = target + hint - range;
381✔
249
  }
250
  lj_trace_err(J, LJ_TRERR_MCODEAL);  /* Give up. OS probably ignores hints? */
×
251
  return NULL;
252
}
253

254
#else
255

256
/* All memory addresses are reachable by relative jumps. */
257
static void *mcode_alloc(jit_State *J, size_t sz)
258
{
259
#if defined(__OpenBSD__) || LJ_TARGET_UWP
260
  /* Allow better executable memory allocation for OpenBSD W^X mode. */
261
  void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
262
  if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
263
    mcode_free(J, p, sz);
264
    return NULL;
265
  }
266
  return p;
267
#else
268
  return mcode_alloc_at(J, 0, sz, MCPROT_GEN);
269
#endif
270
}
271

272
#endif
273

274
/* -- MCode area management ----------------------------------------------- */
275

276
/* Allocate a new MCode area. */
277
static void mcode_allocarea(jit_State *J)
320✔
278
{
279
  MCode *oldarea = J->mcarea;
320✔
280
  size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
320✔
281
  sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
320✔
282
  J->mcarea = (MCode *)mcode_alloc(J, sz);
320✔
283
  J->szmcarea = sz;
320✔
284
  J->mcprot = MCPROT_GEN;
320✔
285
  J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
320✔
286
  J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
320✔
287
  ((MCLink *)J->mcarea)->next = oldarea;
320✔
288
  ((MCLink *)J->mcarea)->size = sz;
320✔
289
  J->szallmcarea += sz;
320✔
290
  J->mcbot = (MCode *)lj_err_register_mcode(J->mcarea, sz, (uint8_t *)J->mcbot);
320✔
291
}
320✔
292

293
/* Free all MCode areas. */
294
void lj_mcode_free(jit_State *J)
516✔
295
{
296
  MCode *mc = J->mcarea;
516✔
297
  J->mcarea = NULL;
516✔
298
  J->szallmcarea = 0;
516✔
299
  while (mc) {
833✔
300
    MCode *next = ((MCLink *)mc)->next;
317✔
301
    size_t sz = ((MCLink *)mc)->size;
317✔
302
    lj_err_deregister_mcode(mc, sz, (uint8_t *)mc + sizeof(MCLink));
317✔
303
    mcode_free(J, mc, sz);
317✔
304
    mc = next;
317✔
305
  }
306
}
516✔
307

308
/* -- MCode transactions -------------------------------------------------- */
309

310
/* Reserve the remainder of the current MCode area. */
311
MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
2,294✔
312
{
313
  if (!J->mcarea)
2,294✔
314
    mcode_allocarea(J);
313✔
315
  else
316
    mcode_protect(J, MCPROT_GEN);
1,981✔
317
  *lim = J->mcbot;
2,294✔
318
  return J->mctop;
2,294✔
319
}
320

321
/* Commit the top part of the current MCode area. */
322
void lj_mcode_commit(jit_State *J, MCode *top)
2,282✔
323
{
324
  J->mctop = top;
2,282✔
325
  mcode_protect(J, MCPROT_RUN);
2,282✔
326
}
2,281✔
327

328
/* Abort the reservation. */
329
void lj_mcode_abort(jit_State *J)
1,524✔
330
{
331
  if (J->mcarea)
1,517✔
332
    mcode_protect(J, MCPROT_RUN);
1,502✔
333
}
1,517✔
334

335
/* Set/reset protection to allow patching of MCode areas. */
336
MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
1,794✔
337
{
338
#ifdef LUAJIT_UNPROTECT_MCODE
339
  UNUSED(J); UNUSED(ptr); UNUSED(finish);
340
  return NULL;
341
#else
342
  if (finish) {
1,794✔
343
    if (J->mcarea == ptr)
897✔
344
      mcode_protect(J, MCPROT_RUN);
687✔
345
    else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN)))
210✔
346
      mcode_protfail(J);
×
347
    return NULL;
897✔
348
  } else {
349
    MCode *mc = J->mcarea;
897✔
350
    /* Try current area first to use the protection cache. */
351
    if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
897✔
352
      mcode_protect(J, MCPROT_GEN);
687✔
353
      return mc;
687✔
354
    }
355
    /* Otherwise search through the list of MCode areas. */
356
    for (;;) {
277✔
357
      mc = ((MCLink *)mc)->next;
277✔
358
      lj_assertJ(mc != NULL, "broken MCode area chain");
277✔
359
      if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
277✔
360
        if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
210✔
361
          mcode_protfail(J);
×
362
        return mc;
363
      }
364
    }
365
  }
366
#endif
367
}
368

369
/* Limit of MCode reservation reached. */
370
void lj_mcode_limiterr(jit_State *J, size_t need)
7✔
371
{
372
  size_t sizemcode, maxmcode;
7✔
373
  lj_mcode_abort(J);
7✔
374
  sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
7✔
375
  sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
7✔
376
  maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
7✔
377
  if ((size_t)need > sizemcode)
7✔
378
    lj_trace_err(J, LJ_TRERR_MCODEOV);  /* Too long for any area. */
×
379
  if (J->szallmcarea + sizemcode > maxmcode)
7✔
380
    lj_trace_err(J, LJ_TRERR_MCODEAL);
×
381
  mcode_allocarea(J);
7✔
382
  lj_trace_err(J, LJ_TRERR_MCODELM);  /* Retry with new area. */
7✔
383
}
384

385
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc