• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 12436052388

20 Dec 2024 05:48PM UTC coverage: 92.968% (-0.01%) from 92.982%
12436052388

push

github

Buristan
RISCV64: Add FFI and JIT support

Contributed by infiWang.

5692 of 6032 branches covered (94.36%)

Branch coverage included in aggregate %.

21716 of 23449 relevant lines covered (92.61%)

2975146.02 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.59
/src/lj_mcode.c
1
/*
2
** Machine code management.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_mcode_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10
#if LJ_HASJIT
11
#include "lj_gc.h"
12
#include "lj_err.h"
13
#include "lj_jit.h"
14
#include "lj_mcode.h"
15
#include "lj_trace.h"
16
#include "lj_dispatch.h"
17
#endif
18
#if LJ_HASJIT || LJ_HASFFI
19
#include "lj_vm.h"
20
#endif
21

22
/* -- OS-specific functions ----------------------------------------------- */
23

24
#if LJ_HASJIT || LJ_HASFFI
25

26
/* Define this if you want to run LuaJIT with Valgrind. */
27
#ifdef LUAJIT_USE_VALGRIND
28
#include <valgrind/valgrind.h>
29
#endif
30

31
#if LJ_TARGET_IOS
32
void sys_icache_invalidate(void *start, size_t len);
33
#endif
34

35
#if LJ_TARGET_RISCV64 && LJ_TARGET_LINUX
36
#include <unistd.h>
37
#include <sys/syscall.h>
38
#include <sys/cachectl.h>
39
#endif
40

41
/* Synchronize data/instruction cache. */
42
void lj_mcode_sync(void *start, void *end)
36,997✔
43
{
44
#ifdef LUAJIT_USE_VALGRIND
45
  VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
46
#endif
47
#if LJ_TARGET_X86ORX64
48
  UNUSED(start); UNUSED(end);
36,997✔
49
#elif LJ_TARGET_IOS
50
  sys_icache_invalidate(start, (char *)end-(char *)start);
51
#elif LJ_TARGET_PPC
52
  lj_vm_cachesync(start, end);
53
#elif LJ_TARGET_RISCV64 && LJ_TARGET_LINUX
54
#if (defined(__GNUC__) || defined(__clang__))
55
  __asm__ volatile("fence rw, rw");
56
#else
57
  lj_vm_fence_rw_rw();
58
#endif
59
#ifdef __GLIBC__
60
  __riscv_flush_icache(start, end, 0);
61
#else
62
  syscall(__NR_riscv_flush_icache, start, end, 0UL);
63
#endif
64
#elif defined(__GNUC__) || defined(__clang__)
65
  __clear_cache(start, end);
66
#else
67
#error "Missing builtin to flush instruction cache"
68
#endif
69
}
36,997✔
70

71
#endif
72

73
#if LJ_HASJIT
74

75
#if LJ_TARGET_WINDOWS
76

77
#define WIN32_LEAN_AND_MEAN
78
#include <windows.h>
79

80
#define MCPROT_RW        PAGE_READWRITE
81
#define MCPROT_RX        PAGE_EXECUTE_READ
82
#define MCPROT_RWX        PAGE_EXECUTE_READWRITE
83

84
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
85
{
86
  void *p = LJ_WIN_VALLOC((void *)hint, sz,
87
                          MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
88
  if (!p && !hint)
89
    lj_trace_err(J, LJ_TRERR_MCODEAL);
90
  return p;
91
}
92

93
static void mcode_free(jit_State *J, void *p, size_t sz)
94
{
95
  UNUSED(J); UNUSED(sz);
96
  VirtualFree(p, 0, MEM_RELEASE);
97
}
98

99
static int mcode_setprot(void *p, size_t sz, DWORD prot)
100
{
101
  DWORD oprot;
102
  return !LJ_WIN_VPROTECT(p, sz, prot, &oprot);
103
}
104

105
#elif LJ_TARGET_POSIX
106

107
#include <sys/mman.h>
108

109
#ifndef MAP_ANONYMOUS
110
#define MAP_ANONYMOUS        MAP_ANON
111
#endif
112

113
#define MCPROT_RW        (PROT_READ|PROT_WRITE)
114
#define MCPROT_RX        (PROT_READ|PROT_EXEC)
115
#define MCPROT_RWX        (PROT_READ|PROT_WRITE|PROT_EXEC)
116

117
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
1,612✔
118
{
119
  void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1,612✔
120
  if (p == MAP_FAILED) {
1,612✔
121
    if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
×
122
    p = NULL;
123
  }
124
  return p;
1,612✔
125
}
126

127
static void mcode_free(jit_State *J, void *p, size_t sz)
1,609✔
128
{
129
  UNUSED(J);
1,609✔
130
  munmap(p, sz);
1,609✔
131
}
130✔
132

133
static int mcode_setprot(void *p, size_t sz, int prot)
67,558✔
134
{
135
  return mprotect(p, sz, prot);
67,558✔
136
}
137

138
#elif LJ_64
139

140
#error "Missing OS support for explicit placement of executable memory"
141

142
#else
143

144
/* Fallback allocator. This will fail if memory is not executable by default. */
145
#define LUAJIT_UNPROTECT_MCODE
146
#define MCPROT_RW        0
147
#define MCPROT_RX        0
148
#define MCPROT_RWX        0
149

150
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
151
{
152
  UNUSED(hint); UNUSED(prot);
153
  return lj_mem_new(J->L, sz);
154
}
155

156
static void mcode_free(jit_State *J, void *p, size_t sz)
157
{
158
  lj_mem_free(J2G(J), p, sz);
159
}
160

161
#endif
162

163
/* -- MCode area protection ----------------------------------------------- */
164

165
/* Define this ONLY if page protection twiddling becomes a bottleneck. */
166
#ifdef LUAJIT_UNPROTECT_MCODE
167

168
/* It's generally considered to be a potential security risk to have
169
** pages with simultaneous write *and* execute access in a process.
170
**
171
** Do not even think about using this mode for server processes or
172
** apps handling untrusted external data (such as a browser).
173
**
174
** The security risk is not in LuaJIT itself -- but if an adversary finds
175
** any *other* flaw in your C application logic, then any RWX memory page
176
** simplifies writing an exploit considerably.
177
*/
178
#define MCPROT_GEN        MCPROT_RWX
179
#define MCPROT_RUN        MCPROT_RWX
180

181
static void mcode_protect(jit_State *J, int prot)
182
{
183
  UNUSED(J); UNUSED(prot);
184
}
185

186
#else
187

188
/* This is the default behaviour and much safer:
189
**
190
** Most of the time the memory pages holding machine code are executable,
191
** but NONE of them is writable.
192
**
193
** The current memory area is marked read-write (but NOT executable) only
194
** during the short time window while the assembler generates machine code.
195
*/
196
#define MCPROT_GEN        MCPROT_RW
197
#define MCPROT_RUN        MCPROT_RX
198

199
/* Protection twiddling failed. Probably due to kernel security. */
200
static LJ_NORET LJ_NOINLINE void mcode_protfail(jit_State *J)
1✔
201
{
202
  lua_CFunction panic = J2G(J)->panic;
1✔
203
  if (panic) {
1✔
204
    lua_State *L = J->L;
1✔
205
    setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT));
1✔
206
    panic(L);
1✔
207
  }
208
  exit(EXIT_FAILURE);
1✔
209
}
210

211
/* Change protection of MCode area. */
212
static void mcode_protect(jit_State *J, int prot)
56,472✔
213
{
214
  if (J->mcprot != prot) {
56,472✔
215
    if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot)))
43,614✔
216
      mcode_protfail(J);
1✔
217
    J->mcprot = prot;
43,613✔
218
  }
219
}
56,471✔
220

221
#endif
222

223
/* -- MCode area allocation ----------------------------------------------- */
224

225
#if LJ_64
226
#define mcode_validptr(p)        (p)
227
#else
228
#define mcode_validptr(p)        ((p) && (uintptr_t)(p) < 0xffff0000)
229
#endif
230

231
#ifdef LJ_TARGET_JUMPRANGE
232

233
/* Get memory within relative jump distance of our code in 64 bit mode. */
234
static void *mcode_alloc(jit_State *J, size_t sz)
1,482✔
235
{
236
  /* Target an address in the static assembler code (64K aligned).
237
  ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
238
  ** Use half the jump range so every address in the range can reach any other.
239
  */
240
#if LJ_TARGET_MIPS
241
  /* Use the middle of the 256MB-aligned region. */
242
  uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler &
243
                      ~(uintptr_t)0x0fffffffu) + 0x08000000u;
244
#else
245
  uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
1,482✔
246
#endif
247
  const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
1,482✔
248
  /* First try a contiguous area below the last one. */
249
  uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
1,482✔
250
  int i;
251
  /* Limit probing iterations, depending on the available pool size. */
252
  for (i = 0; i < LJ_TARGET_JUMPRANGE; i++) {
2,133✔
253
    if (mcode_validptr(hint)) {
2,133✔
254
      void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
1,612✔
255

256
      if (mcode_validptr(p) &&
1,612✔
257
          ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
1,612✔
258
        return p;
1,482✔
259
      if (p) mcode_free(J, p, sz);  /* Free badly placed area. */
130✔
260
    }
261
    /* Next try probing 64K-aligned pseudo-random addresses. */
262
    do {
651✔
263
      hint = LJ_PRNG_BITS(J, LJ_TARGET_JUMPRANGE-16) << 16;
651✔
264
    } while (!(hint + sz < range+range));
651✔
265
    hint = target + hint - range;
651✔
266
  }
267
  lj_trace_err(J, LJ_TRERR_MCODEAL);  /* Give up. OS probably ignores hints? */
×
268
  return NULL;
269
}
270

271
#else
272

273
/* All memory addresses are reachable by relative jumps. */
274
static void *mcode_alloc(jit_State *J, size_t sz)
275
{
276
#if defined(__OpenBSD__) || LJ_TARGET_UWP
277
  /* Allow better executable memory allocation for OpenBSD W^X mode. */
278
  void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
279
  if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
280
    mcode_free(J, p, sz);
281
    return NULL;
282
  }
283
  return p;
284
#else
285
  return mcode_alloc_at(J, 0, sz, MCPROT_GEN);
286
#endif
287
}
288

289
#endif
290

291
/* -- MCode area management ----------------------------------------------- */
292

293
/* Allocate a new MCode area. */
294
static void mcode_allocarea(jit_State *J)
1,482✔
295
{
296
  MCode *oldarea = J->mcarea;
1,482✔
297
  size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
1,482✔
298
  sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
1,482✔
299
  J->mcarea = (MCode *)mcode_alloc(J, sz);
1,482✔
300
  J->szmcarea = sz;
1,482✔
301
  J->mcprot = MCPROT_GEN;
1,482✔
302
  J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
1,482✔
303
  J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
1,482✔
304
  ((MCLink *)J->mcarea)->next = oldarea;
1,482✔
305
  ((MCLink *)J->mcarea)->size = sz;
1,482✔
306
  J->szallmcarea += sz;
1,482✔
307
  J->mcbot = (MCode *)lj_err_register_mcode(J->mcarea, sz, (uint8_t *)J->mcbot);
1,482✔
308
}
1,482✔
309

310
/* Free all MCode areas. */
311
void lj_mcode_free(jit_State *J)
804✔
312
{
313
  MCode *mc = J->mcarea;
804✔
314
  J->mcarea = NULL;
804✔
315
  J->szallmcarea = 0;
804✔
316
  while (mc) {
2,283✔
317
    MCode *next = ((MCLink *)mc)->next;
1,479✔
318
    size_t sz = ((MCLink *)mc)->size;
1,479✔
319
    lj_err_deregister_mcode(mc, sz, (uint8_t *)mc + sizeof(MCLink));
1,479✔
320
    mcode_free(J, mc, sz);
1,479✔
321
    mc = next;
1,479✔
322
  }
323
}
804✔
324

325
/* -- MCode transactions -------------------------------------------------- */
326

327
/* Reserve the remainder of the current MCode area. */
328
MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
21,587✔
329
{
330
  if (!J->mcarea)
21,587✔
331
    mcode_allocarea(J);
521✔
332
  else
333
    mcode_protect(J, MCPROT_GEN);
21,066✔
334
  *lim = J->mcbot;
21,587✔
335
  return J->mctop;
21,587✔
336
}
337

338
/* Commit the top part of the current MCode area. */
339
void lj_mcode_commit(jit_State *J, MCode *top)
20,415✔
340
{
341
  J->mctop = top;
20,415✔
342
  mcode_protect(J, MCPROT_RUN);
20,415✔
343
}
20,414✔
344

345
/* Abort the reservation. */
346
void lj_mcode_abort(jit_State *J)
5,844✔
347
{
348
  if (J->mcarea)
4,756✔
349
    mcode_protect(J, MCPROT_RUN);
5,775✔
350
}
4,756✔
351

352
/* Set/reset protection to allow patching of MCode areas. */
353
MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
33,160✔
354
{
355
#ifdef LUAJIT_UNPROTECT_MCODE
356
  UNUSED(J); UNUSED(ptr); UNUSED(finish);
357
  return NULL;
358
#else
359
  if (finish) {
33,160✔
360
    if (J->mcarea == ptr)
16,580✔
361
      mcode_protect(J, MCPROT_RUN);
4,608✔
362
    else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN)))
11,972✔
363
      mcode_protfail(J);
×
364
    return NULL;
16,580✔
365
  } else {
366
    MCode *mc = J->mcarea;
16,580✔
367
    /* Try current area first to use the protection cache. */
368
    if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
16,580✔
369
      mcode_protect(J, MCPROT_GEN);
4,608✔
370
      return mc;
4,608✔
371
    }
372
    /* Otherwise search through the list of MCode areas. */
373
    for (;;) {
31,946✔
374
      mc = ((MCLink *)mc)->next;
31,946✔
375
      lj_assertJ(mc != NULL, "broken MCode area chain");
31,946✔
376
      if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
31,946✔
377
        if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
11,972✔
378
          mcode_protfail(J);
×
379
        return mc;
380
      }
381
    }
382
  }
383
#endif
384
}
385

386
/* Limit of MCode reservation reached. */
387
void lj_mcode_limiterr(jit_State *J, size_t need)
1,088✔
388
{
389
  size_t sizemcode, maxmcode;
1,088✔
390
  lj_mcode_abort(J);
1,088✔
391
  sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
1,088✔
392
  sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
1,088✔
393
  maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
1,088✔
394
  if ((size_t)need > sizemcode)
1,088✔
395
    lj_trace_err(J, LJ_TRERR_MCODEOV);  /* Too long for any area. */
×
396
  if (J->szallmcarea + sizemcode > maxmcode)
1,088✔
397
    lj_trace_err(J, LJ_TRERR_MCODEAL);
127✔
398
  mcode_allocarea(J);
961✔
399
  lj_trace_err(J, LJ_TRERR_MCODELM);  /* Retry with new area. */
961✔
400
}
401

402
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc