• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 5942561862

22 Aug 2023 05:18PM UTC coverage: 87.9% (+0.08%) from 87.818%
5942561862

push

github

igormunkin
MIPS: Add MIPS64 R6 port.

Contributed by Hua Zhang, YunQiang Su from Wave Computing,
and Radovan Birdic from RT-RK.
Sponsored by Wave Computing.

(cherry-picked from commit 94d0b5300)

This patch adds support for MIPS Release 6 [1] for the 64-bit build.
This includes:
* Global `_map_def` value is set with <dynasm/dynasm.lua>. `MIPSR6` key
  specifies the corresponding instruction set support. Also, `MIPSR6` is
  defined in `DYNASM_FLAGS` (`DASM_AFLAGS`).
* New instructions are added within <dynasm/dasm_mips.lua>, they are
  used if the aforementioned key is set.
* Obsolete instructions (that are no longer in use in r6) are used in
  the opposite case (if `MIPSR6` isn't set).
* New opcode maps are added to <src/jit/dis_mips.lua>.
* `map_arch` table in <jit/bcsave.lua> is refactored for more convenient
  usage. Now each arch key contains a table with the corresponding info
  about the supported architecture:
    - `e`: endianness "le" or "be"
    - `b`: bit-width of the supported architecture; 32 or 64
    - `m`: machine specification (see `e_machine` in man elf)
    - `f`: processor-specific flags (see `e_flags` in man elf)
    - `p`: number that identifies the type of target machine [2] for
      Portable Executable format [3].
* New `LJ_TARGET_MIPSR6` define is set for MIPSR6 in <src/lj_arch.h>.
* The corresponding "MIPS32R6", "MIPS64R6" CPU strings are added to the
  <src/jit.h>
* MIPSR6 instructions are added to the <src/lj_target_mips.h>, some
  obsolete instructions are removed or defined only for the non-MIPSR6
  build.
* All release-dependent instructions in <src/lj_asm_mips.h> are
  instrumented with `LJ_TARGET_MIPSR6` macro.
* `f20`, `f21`, `f22` FP registers are defined as `FTMP0`, `FTMP1`,
  `FTMP2` correspondingly in the VM.
* All release-dependent instructions in <src/vm_mips64.dasm> are
  instrumented with `MIPSR6` macro.
* `sfmin_max` macro now takes the third operand for the MIPSR6 build.
* Fix the impli... (continued)

5343 of 6000 branches covered (89.05%)

Branch coverage included in aggregate %.

20504 of 23405 relevant lines covered (87.61%)

1253254.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.32
/src/lj_mcode.c
1
/*
2
** Machine code management.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_mcode_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10
#if LJ_HASJIT
11
#include "lj_gc.h"
12
#include "lj_err.h"
13
#include "lj_jit.h"
14
#include "lj_mcode.h"
15
#include "lj_trace.h"
16
#include "lj_dispatch.h"
17
#endif
18
#if LJ_HASJIT || LJ_HASFFI
19
#include "lj_vm.h"
20
#endif
21

22
/* -- OS-specific functions ----------------------------------------------- */
23

24
#if LJ_HASJIT || LJ_HASFFI
25

26
/* Define this if you want to run LuaJIT with Valgrind. */
27
#ifdef LUAJIT_USE_VALGRIND
28
#include <valgrind/valgrind.h>
29
#endif
30

31
#if LJ_TARGET_IOS
32
void sys_icache_invalidate(void *start, size_t len);
33
#endif
34

35
/* Synchronize data/instruction cache. */
36
void lj_mcode_sync(void *start, void *end)
2,932✔
37
{
38
#ifdef LUAJIT_USE_VALGRIND
39
  VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
40
#endif
41
#if LJ_TARGET_X86ORX64
42
  UNUSED(start); UNUSED(end);
2,932✔
43
#elif LJ_TARGET_IOS
44
  sys_icache_invalidate(start, (char *)end-(char *)start);
45
#elif LJ_TARGET_PPC
46
  lj_vm_cachesync(start, end);
47
#elif defined(__GNUC__)
48
  __clear_cache(start, end);
49
#else
50
#error "Missing builtin to flush instruction cache"
51
#endif
52
}
2,932✔
53

54
#endif
55

56
#if LJ_HASJIT
57

58
#if LJ_TARGET_WINDOWS
59

60
#define WIN32_LEAN_AND_MEAN
61
#include <windows.h>
62

63
#define MCPROT_RW        PAGE_READWRITE
64
#define MCPROT_RX        PAGE_EXECUTE_READ
65
#define MCPROT_RWX        PAGE_EXECUTE_READWRITE
66

67
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
68
{
69
  void *p = LJ_WIN_VALLOC((void *)hint, sz,
70
                          MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
71
  if (!p && !hint)
72
    lj_trace_err(J, LJ_TRERR_MCODEAL);
73
  return p;
74
}
75

76
static void mcode_free(jit_State *J, void *p, size_t sz)
77
{
78
  UNUSED(J); UNUSED(sz);
79
  VirtualFree(p, 0, MEM_RELEASE);
80
}
81

82
static int mcode_setprot(void *p, size_t sz, DWORD prot)
83
{
84
  DWORD oprot;
85
  return !LJ_WIN_VPROTECT(p, sz, prot, &oprot);
86
}
87

88
#elif LJ_TARGET_POSIX
89

90
#include <sys/mman.h>
91

92
#ifndef MAP_ANONYMOUS
93
#define MAP_ANONYMOUS        MAP_ANON
94
#endif
95

96
#define MCPROT_RW        (PROT_READ|PROT_WRITE)
97
#define MCPROT_RX        (PROT_READ|PROT_EXEC)
98
#define MCPROT_RWX        (PROT_READ|PROT_WRITE|PROT_EXEC)
99

100
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
148✔
101
{
102
  void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
148✔
103
  if (p == MAP_FAILED) {
148✔
104
    if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
×
105
    p = NULL;
106
  }
107
  return p;
148✔
108
}
109

110
static void mcode_free(jit_State *J, void *p, size_t sz)
146✔
111
{
112
  UNUSED(J);
146✔
113
  munmap(p, sz);
146✔
114
}
61✔
115

116
static int mcode_setprot(void *p, size_t sz, int prot)
4,559✔
117
{
118
  return mprotect(p, sz, prot);
4,559✔
119
}
120

121
#elif LJ_64
122

123
#error "Missing OS support for explicit placement of executable memory"
124

125
#else
126

127
/* Fallback allocator. This will fail if memory is not executable by default. */
128
#define LUAJIT_UNPROTECT_MCODE
129
#define MCPROT_RW        0
130
#define MCPROT_RX        0
131
#define MCPROT_RWX        0
132

133
static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
134
{
135
  UNUSED(hint); UNUSED(prot);
136
  return lj_mem_new(J->L, sz);
137
}
138

139
static void mcode_free(jit_State *J, void *p, size_t sz)
140
{
141
  lj_mem_free(J2G(J), p, sz);
142
}
143

144
#endif
145

146
/* -- MCode area protection ----------------------------------------------- */
147

148
/* Define this ONLY if page protection twiddling becomes a bottleneck. */
149
#ifdef LUAJIT_UNPROTECT_MCODE
150

151
/* It's generally considered to be a potential security risk to have
152
** pages with simultaneous write *and* execute access in a process.
153
**
154
** Do not even think about using this mode for server processes or
155
** apps handling untrusted external data (such as a browser).
156
**
157
** The security risk is not in LuaJIT itself -- but if an adversary finds
158
** any *other* flaw in your C application logic, then any RWX memory page
159
** simplifies writing an exploit considerably.
160
*/
161
#define MCPROT_GEN        MCPROT_RWX
162
#define MCPROT_RUN        MCPROT_RWX
163

164
static void mcode_protect(jit_State *J, int prot)
165
{
166
  UNUSED(J); UNUSED(prot);
167
}
168

169
#else
170

171
/* This is the default behaviour and much safer:
172
**
173
** Most of the time the memory pages holding machine code are executable,
174
** but NONE of them is writable.
175
**
176
** The current memory area is marked read-write (but NOT executable) only
177
** during the short time window while the assembler generates machine code.
178
*/
179
#define MCPROT_GEN        MCPROT_RW
180
#define MCPROT_RUN        MCPROT_RX
181

182
/* Protection twiddling failed. Probably due to kernel security. */
183
static LJ_NOINLINE void mcode_protfail(jit_State *J)
×
184
{
185
  lua_CFunction panic = J2G(J)->panic;
×
186
  if (panic) {
×
187
    lua_State *L = J->L;
×
188
    setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT));
×
189
    panic(L);
×
190
  }
191
}
×
192

193
/* Change protection of MCode area. */
194
static void mcode_protect(jit_State *J, int prot)
6,761✔
195
{
196
  if (J->mcprot != prot) {
6,761✔
197
    if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot)))
4,033✔
198
      mcode_protfail(J);
×
199
    J->mcprot = prot;
4,033✔
200
  }
201
}
6,761✔
202

203
#endif
204

205
/* -- MCode area allocation ----------------------------------------------- */
206

207
#if LJ_64
208
#define mcode_validptr(p)        (p)
209
#else
210
#define mcode_validptr(p)        ((p) && (uintptr_t)(p) < 0xffff0000)
211
#endif
212

213
#ifdef LJ_TARGET_JUMPRANGE
214

215
/* Get memory within relative jump distance of our code in 64 bit mode. */
216
static void *mcode_alloc(jit_State *J, size_t sz)
87✔
217
{
218
  /* Target an address in the static assembler code (64K aligned).
219
  ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
220
  ** Use half the jump range so every address in the range can reach any other.
221
  */
222
#if LJ_TARGET_MIPS
223
  /* Use the middle of the 256MB-aligned region. */
224
  uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler &
225
                      ~(uintptr_t)0x0fffffffu) + 0x08000000u;
226
#else
227
  uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
87✔
228
#endif
229
  const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
87✔
230
  /* First try a contiguous area below the last one. */
231
  uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
87✔
232
  int i;
233
  /* Limit probing iterations, depending on the available pool size. */
234
  for (i = 0; i < LJ_TARGET_JUMPRANGE; i++) {
227✔
235
    if (mcode_validptr(hint)) {
227✔
236
      void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
148✔
237

238
      if (mcode_validptr(p) &&
148✔
239
          ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
148✔
240
        return p;
87✔
241
      if (p) mcode_free(J, p, sz);  /* Free badly placed area. */
61✔
242
    }
243
    /* Next try probing 64K-aligned pseudo-random addresses. */
244
    do {
140✔
245
      hint = LJ_PRNG_BITS(J, LJ_TARGET_JUMPRANGE-16) << 16;
140✔
246
    } while (!(hint + sz < range+range));
140✔
247
    hint = target + hint - range;
140✔
248
  }
249
  lj_trace_err(J, LJ_TRERR_MCODEAL);  /* Give up. OS probably ignores hints? */
×
250
  return NULL;
251
}
252

253
#else
254

255
/* All memory addresses are reachable by relative jumps. */
256
static void *mcode_alloc(jit_State *J, size_t sz)
257
{
258
#if defined(__OpenBSD__) || LJ_TARGET_UWP
259
  /* Allow better executable memory allocation for OpenBSD W^X mode. */
260
  void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
261
  if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
262
    mcode_free(J, p, sz);
263
    return NULL;
264
  }
265
  return p;
266
#else
267
  return mcode_alloc_at(J, 0, sz, MCPROT_GEN);
268
#endif
269
}
270

271
#endif
272

273
/* -- MCode area management ----------------------------------------------- */
274

275
/* Allocate a new MCode area. */
276
static void mcode_allocarea(jit_State *J)
87✔
277
{
278
  MCode *oldarea = J->mcarea;
87✔
279
  size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
87✔
280
  sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
87✔
281
  J->mcarea = (MCode *)mcode_alloc(J, sz);
87✔
282
  J->szmcarea = sz;
87✔
283
  J->mcprot = MCPROT_GEN;
87✔
284
  J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
87✔
285
  J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
87✔
286
  ((MCLink *)J->mcarea)->next = oldarea;
87✔
287
  ((MCLink *)J->mcarea)->size = sz;
87✔
288
  J->szallmcarea += sz;
87✔
289
  J->mcbot = (MCode *)lj_err_register_mcode(J->mcarea, sz, (uint8_t *)J->mcbot);
87✔
290
}
87✔
291

292
/* Free all MCode areas. */
293
void lj_mcode_free(jit_State *J)
274✔
294
{
295
  MCode *mc = J->mcarea;
274✔
296
  J->mcarea = NULL;
274✔
297
  J->szallmcarea = 0;
274✔
298
  while (mc) {
359✔
299
    MCode *next = ((MCLink *)mc)->next;
85✔
300
    size_t sz = ((MCLink *)mc)->size;
85✔
301
    lj_err_deregister_mcode(mc, sz, (uint8_t *)mc + sizeof(MCLink));
85✔
302
    mcode_free(J, mc, sz);
85✔
303
    mc = next;
85✔
304
  }
305
}
274✔
306

307
/* -- MCode transactions -------------------------------------------------- */
308

309
/* Reserve the remainder of the current MCode area. */
310
MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
2,052✔
311
{
312
  if (!J->mcarea)
2,052✔
313
    mcode_allocarea(J);
79✔
314
  else
315
    mcode_protect(J, MCPROT_GEN);
1,973✔
316
  *lim = J->mcbot;
2,052✔
317
  return J->mctop;
2,052✔
318
}
319

320
/* Commit the top part of the current MCode area. */
321
void lj_mcode_commit(jit_State *J, MCode *top)
2,039✔
322
{
323
  J->mctop = top;
2,039✔
324
  mcode_protect(J, MCPROT_RUN);
2,039✔
325
}
2,039✔
326

327
/* Abort the reservation. */
328
void lj_mcode_abort(jit_State *J)
1,511✔
329
{
330
  if (J->mcarea)
1,503✔
331
    mcode_protect(J, MCPROT_RUN);
1,491✔
332
}
1,503✔
333

334
/* Set/reset protection to allow patching of MCode areas. */
335
MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
1,784✔
336
{
337
#ifdef LUAJIT_UNPROTECT_MCODE
338
  UNUSED(J); UNUSED(ptr); UNUSED(finish);
339
  return NULL;
340
#else
341
  if (finish) {
1,784✔
342
    if (J->mcarea == ptr)
892✔
343
      mcode_protect(J, MCPROT_RUN);
629✔
344
    else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN)))
263✔
345
      mcode_protfail(J);
×
346
    return NULL;
892✔
347
  } else {
348
    MCode *mc = J->mcarea;
892✔
349
    /* Try current area first to use the protection cache. */
350
    if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
892✔
351
      mcode_protect(J, MCPROT_GEN);
629✔
352
      return mc;
629✔
353
    }
354
    /* Otherwise search through the list of MCode areas. */
355
    for (;;) {
375✔
356
      mc = ((MCLink *)mc)->next;
375✔
357
      lua_assert(mc != NULL);
375✔
358
      if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
375✔
359
        if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
263✔
360
          mcode_protfail(J);
×
361
        return mc;
263✔
362
      }
363
    }
364
  }
365
#endif
366
}
367

368
/* Limit of MCode reservation reached. */
369
void lj_mcode_limiterr(jit_State *J, size_t need)
8✔
370
{
371
  size_t sizemcode, maxmcode;
8✔
372
  lj_mcode_abort(J);
8✔
373
  sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
8✔
374
  sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
8✔
375
  maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
8✔
376
  if ((size_t)need > sizemcode)
8✔
377
    lj_trace_err(J, LJ_TRERR_MCODEOV);  /* Too long for any area. */
×
378
  if (J->szallmcarea + sizemcode > maxmcode)
8✔
379
    lj_trace_err(J, LJ_TRERR_MCODEAL);
×
380
  mcode_allocarea(J);
8✔
381
  lj_trace_err(J, LJ_TRERR_MCODELM);  /* Retry with new area. */
8✔
382
}
383

384
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc