• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 12317215997

13 Dec 2024 02:05PM UTC coverage: 92.948% (+0.02%) from 92.924%
12317215997

push

github

Buristan
cmake: fix build for Alpine

Since Alpine uses musl [1] as its C standard library, the build for it
failed after the commit af0f59da7 ("test:
fix LuaJIT-tests for old libc version"), since `GetLibCVersion()` raises
an error. This patch adds the check of the ID in the
</etc/os-release> [2][3] of the Linux distribution and avoids setting
the glibc version if the distro is "alpine".

[1]: https://wiki.alpinelinux.org/wiki/Musl
[2]: https://www.linux.org/docs/man5/os-release.html
[3]: https://www.freedesktop.org/software/systemd/man/latest/os-release.html

5695 of 6033 branches covered (94.4%)

Branch coverage included in aggregate %.

21708 of 23449 relevant lines covered (92.58%)

2973826.86 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.42
/src/lj_trace.c
1
/*
2
** Trace management.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_trace_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10

11
#if LJ_HASJIT
12

13
#include "lj_gc.h"
14
#include "lj_err.h"
15
#include "lj_debug.h"
16
#include "lj_str.h"
17
#include "lj_frame.h"
18
#include "lj_state.h"
19
#include "lj_bc.h"
20
#include "lj_ir.h"
21
#include "lj_jit.h"
22
#include "lj_iropt.h"
23
#include "lj_mcode.h"
24
#include "lj_trace.h"
25
#include "lj_snap.h"
26
#include "lj_gdbjit.h"
27
#include "lj_record.h"
28
#include "lj_asm.h"
29
#include "lj_dispatch.h"
30
#include "lj_vm.h"
31
#include "lj_vmevent.h"
32
#include "lj_target.h"
33
#if LJ_HASMEMPROF
34
#include "lj_memprof.h"
35
#endif
36
#if LJ_HASSYSPROF
37
#include "lj_sysprof.h"
38
#endif
39

40
/* -- Error handling ------------------------------------------------------ */
41

42
/* Synchronous abort with error message. */
43
void lj_trace_err(jit_State *J, TraceError e)
3,865✔
44
{
45
  setnilV(&J->errinfo);  /* No error info. */
3,865✔
46
  setintV(J->L->top++, (int32_t)e);
3,865✔
47
  lj_err_throw(J->L, LUA_ERRRUN);
3,865✔
48
}
49

50
/* Synchronous abort with error message and error info. */
51
void lj_trace_err_info(jit_State *J, TraceError e)
103✔
52
{
53
  setintV(J->L->top++, (int32_t)e);
103✔
54
  lj_err_throw(J->L, LUA_ERRRUN);
103✔
55
}
56

57
/* -- Trace management ---------------------------------------------------- */
58

59
/* The current trace is first assembled in J->cur. The variable length
60
** arrays point to shared, growable buffers (J->irbuf etc.). When trace
61
** recording ends successfully, the current trace and its data structures
62
** are copied to a new (compact) GCtrace object.
63
*/
64

65
/* Find a free trace number. */
66
static TraceNo trace_findfree(jit_State *J)
24,203✔
67
{
68
  MSize osz, lim;
24,203✔
69
  if (J->freetrace == 0)
24,203✔
70
    J->freetrace = 1;
534✔
71
  for (; J->freetrace < J->sizetrace; J->freetrace++)
24,765✔
72
    if (traceref(J, J->freetrace) == NULL)
24,520✔
73
      return J->freetrace++;
23,958✔
74
  /* Need to grow trace array. */
75
  lim = (MSize)J->param[JIT_P_maxtrace] + 1;
245✔
76
  if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535;
245✔
77
  osz = J->sizetrace;
245✔
78
  if (osz >= lim)
245✔
79
    return 0;  /* Too many traces. */
80
  lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef);
245✔
81
  for (; osz < J->sizetrace; osz++)
7,911✔
82
    setgcrefnull(J->trace[osz]);
7,666✔
83
  return J->freetrace;
245✔
84
}
85

86
#define TRACE_APPENDVEC(field, szfield, tp) \
87
  T->field = (tp *)p; \
88
  memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \
89
  p += J->cur.szfield*sizeof(tp);
90

91
#ifdef LUAJIT_USE_PERFTOOLS
92
/*
93
** Create symbol table of JIT-compiled code. For use with Linux perf tools.
94
** Example usage:
95
**   perf record -f -e cycles luajit test.lua
96
**   perf report -s symbol
97
**   rm perf.data /tmp/perf-*.map
98
*/
99
#include <stdio.h>
100
#include <unistd.h>
101

102
static void perftools_addtrace(GCtrace *T)
103
{
104
  static FILE *fp;
105
  GCproto *pt = &gcref(T->startpt)->pt;
106
  const BCIns *startpc = mref(T->startpc, const BCIns);
107
  const char *name = proto_chunknamestr(pt);
108
  BCLine lineno;
109
  if (name[0] == '@' || name[0] == '=')
110
    name++;
111
  else
112
    name = "(string)";
113
  lj_assertX(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc,
114
             "trace PC out of range");
115
  lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
116
  if (!fp) {
117
    char fname[40];
118
    sprintf(fname, "/tmp/perf-%d.map", getpid());
119
    if (!(fp = fopen(fname, "w"))) return;
120
    setlinebuf(fp);
121
  }
122
  fprintf(fp, "%lx %x TRACE_%d::%s:%u\n",
123
          (long)T->mcode, T->szmcode, T->traceno, name, lineno);
124
}
125
#endif
126

127
/* Allocate space for copy of T. */
128
GCtrace * LJ_FASTCALL lj_trace_alloc(lua_State *L, GCtrace *T)
21,790✔
129
{
130
  size_t sztr = ((sizeof(GCtrace)+7)&~7);
21,790✔
131
  size_t szins = (T->nins-T->nk)*sizeof(IRIns);
21,790✔
132
  size_t sz = sztr + szins +
21,790✔
133
              T->nsnap*sizeof(SnapShot) +
21,790✔
134
              T->nsnapmap*sizeof(SnapEntry);
21,790✔
135
  GCtrace *T2 = lj_mem_newt(L, (MSize)sz, GCtrace);
21,790✔
136
  char *p = (char *)T2 + sztr;
21,790✔
137
  T2->gct = ~LJ_TTRACE;
21,790✔
138
  T2->marked = 0;
21,790✔
139
  T2->traceno = 0;
21,790✔
140
  T2->ir = (IRIns *)p - T->nk;
21,790✔
141
  T2->nins = T->nins;
21,790✔
142
  T2->nk = T->nk;
21,790✔
143
  T2->nsnap = T->nsnap;
21,790✔
144
  T2->nsnapmap = T->nsnapmap;
21,790✔
145
  memcpy(p, T->ir + T->nk, szins);
21,790✔
146
  L2J(L)->tracenum++;
21,790✔
147
  return T2;
21,790✔
148
}
149

150
/* Save current trace by copying and compacting it. */
151
static void trace_save(jit_State *J, GCtrace *T)
20,420✔
152
{
153
  size_t sztr = ((sizeof(GCtrace)+7)&~7);
20,420✔
154
  size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns);
20,420✔
155
  char *p = (char *)T + sztr;
20,420✔
156
  memcpy(T, &J->cur, sizeof(GCtrace));
20,420✔
157
  setgcrefr(T->nextgc, J2G(J)->gc.root);
20,420✔
158
  setgcrefp(J2G(J)->gc.root, T);
20,420✔
159
  newwhite(J2G(J), T);
20,420✔
160
  T->gct = ~LJ_TTRACE;
20,420✔
161
  T->ir = (IRIns *)p - J->cur.nk;  /* The IR has already been copied above. */
20,420✔
162
  p += szins;
20,420✔
163
  TRACE_APPENDVEC(snap, nsnap, SnapShot)
20,420✔
164
  TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry)
20,420✔
165
  J->cur.traceno = 0;
20,420✔
166
  J->curfinal = NULL;
20,420✔
167
  setgcrefp(J->trace[T->traceno], T);
20,420✔
168
  lj_gc_barriertrace(J2G(J), T->traceno);
20,420✔
169
  lj_gdbjit_addtrace(J, T);
20,420✔
170
#ifdef LUAJIT_USE_PERFTOOLS
171
  perftools_addtrace(T);
172
#endif
173

174
  /* Add a new trace to the profiler. */
175
#if LJ_HASMEMPROF
176
  lj_memprof_add_trace(T);
20,420✔
177
#endif
178

179
#if LJ_HASSYSPROF
180
  lj_sysprof_add_trace(T);
20,420✔
181
#endif
182
}
20,420✔
183

184
void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T)
21,787✔
185
{
186
  jit_State *J = G2J(g);
21,787✔
187
  if (T->traceno) {
21,787✔
188
    lj_gdbjit_deltrace(J, T);
3,286✔
189
    if (T->traceno < J->freetrace)
3,286✔
190
      J->freetrace = T->traceno;
3,196✔
191
    setgcrefnull(J->trace[T->traceno]);
3,286✔
192
  }
193
  lj_mem_free(g, T,
43,574✔
194
    ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
21,787✔
195
    T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry));
21,787✔
196
  J->tracenum--;
21,787✔
197
}
21,787✔
198

199
/* Re-enable compiling a prototype by unpatching any modified bytecode. */
200
void lj_trace_reenableproto(GCproto *pt)
51✔
201
{
202
  if ((pt->flags & PROTO_ILOOP)) {
51✔
203
    BCIns *bc = proto_bc(pt);
10✔
204
    BCPos i, sizebc = pt->sizebc;
10✔
205
    pt->flags &= ~PROTO_ILOOP;
10✔
206
    if (bc_op(bc[0]) == BC_IFUNCF)
10✔
207
      setbc_op(&bc[0], BC_FUNCF);
×
208
    for (i = 1; i < sizebc; i++) {
1,670✔
209
      BCOp op = bc_op(bc[i]);
1,660✔
210
      if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP)
1,660✔
211
        setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP);
10✔
212
    }
213
  }
214
}
51✔
215

216
/* Unpatch the bytecode modified by a root trace. */
217
static void trace_unpatch(jit_State *J, GCtrace *T)
218
{
219
  BCOp op = bc_op(T->startins);
220
  BCIns *pc = mref(T->startpc, BCIns);
221
  UNUSED(J);
222
  if (op == BC_JMP)
223
    return;  /* No need to unpatch branches in parent traces (yet). */
224
  switch (bc_op(*pc)) {
225
  case BC_JFORL:
226
    lj_assertJ(traceref(J, bc_d(*pc)) == T, "JFORL references other trace");
227
    *pc = T->startins;
228
    pc += bc_j(T->startins);
229
    lj_assertJ(bc_op(*pc) == BC_JFORI, "FORL does not point to JFORI");
230
    setbc_op(pc, BC_FORI);
231
    break;
232
  case BC_JITERL:
233
  case BC_JLOOP:
234
    lj_assertJ(op == BC_ITERL || op == BC_LOOP || bc_isret(op),
235
               "bad original bytecode %d", op);
236
    *pc = T->startins;
237
    break;
238
  case BC_JMP:
239
    lj_assertJ(op == BC_ITERL, "bad original bytecode %d", op);
240
    pc += bc_j(*pc)+2;
241
    if (bc_op(*pc) == BC_JITERL) {
242
      lj_assertJ(traceref(J, bc_d(*pc)) == T, "JITERL references other trace");
243
      *pc = T->startins;
244
    }
245
    break;
246
  case BC_JFUNCF:
247
    lj_assertJ(op == BC_FUNCF, "bad original bytecode %d", op);
248
    *pc = T->startins;
249
    break;
250
  default:  /* Already unpatched. */
251
    break;
252
  }
253
}
254

255
/* Flush a root trace. */
256
static void trace_flushroot(jit_State *J, GCtrace *T)
257
{
258
  GCproto *pt = &gcref(T->startpt)->pt;
259
  lj_assertJ(T->root == 0, "not a root trace");
260
  lj_assertJ(pt != NULL, "trace has no prototype");
261
  /* First unpatch any modified bytecode. */
262
  trace_unpatch(J, T);
263
  /* Unlink root trace from chain anchored in prototype. */
264
  if (pt->trace == T->traceno) {  /* Trace is first in chain. Easy. */
265
    pt->trace = T->nextroot;
266
  } else if (pt->trace) {  /* Otherwise search in chain of root traces. */
267
    GCtrace *T2 = traceref(J, pt->trace);
268
    if (T2) {
269
      for (; T2->nextroot; T2 = traceref(J, T2->nextroot))
270
        if (T2->nextroot == T->traceno) {
271
          T2->nextroot = T->nextroot;  /* Unlink from chain. */
272
          break;
273
        }
274
    }
275
  }
276
}
277

278
/* Flush a trace. Only root traces are considered. */
279
void lj_trace_flush(jit_State *J, TraceNo traceno)
59✔
280
{
281
  if (traceno > 0 && traceno < J->sizetrace) {
59✔
282
    GCtrace *T = traceref(J, traceno);
59✔
283
    if (T && T->root == 0)
59✔
284
      trace_flushroot(J, T);
56✔
285
  }
286
}
59✔
287

288
/* Flush all traces associated with a prototype. */
289
void lj_trace_flushproto(global_State *g, GCproto *pt)
219✔
290
{
291
  while (pt->trace != 0)
237✔
292
    trace_flushroot(G2J(g), traceref(G2J(g), pt->trace));
18✔
293
}
219✔
294

295
/* Flush all traces. */
296
int lj_trace_flushall(lua_State *L)
453✔
297
{
298
  jit_State *J = L2J(L);
453✔
299
  ptrdiff_t i;
453✔
300
  if ((J2G(J)->hookmask & HOOK_GC))
453✔
301
    return 1;
302
  for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) {
50,933✔
303
    GCtrace *T = traceref(J, i);
50,480✔
304
    if (T) {
50,480✔
305
      if (T->root == 0)
17,133✔
306
        trace_flushroot(J, T);
2,119✔
307
      lj_gdbjit_deltrace(J, T);
17,133✔
308
      T->traceno = T->link = 0;  /* Blacklist the link for cont_stitch. */
17,133✔
309
      setgcrefnull(J->trace[i]);
17,133✔
310
    }
311
  }
312
  J->cur.traceno = 0;
453✔
313
  J->freetrace = 0;
453✔
314
  /* Clear penalty cache. */
315
  memset(J->penalty, 0, sizeof(J->penalty));
453✔
316
  /* Free the whole machine code and invalidate all exit stub groups. */
317
  lj_mcode_free(J);
453✔
318
  memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
453✔
319
  lj_vmevent_send(L, TRACE,
453✔
320
    setstrV(L, L->top++, lj_str_newlit(L, "flush"));
321
  );
322
  return 0;
323
}
324

325
/* Initialize JIT compiler state. */
326
void lj_trace_initstate(global_State *g)
356✔
327
{
328
  jit_State *J = G2J(g);
356✔
329
  TValue *tv;
356✔
330

331
  /* Initialize aligned SIMD constants. */
332
  tv = LJ_KSIMD(J, LJ_KSIMD_ABS);
356✔
333
  tv[0].u64 = U64x(7fffffff,ffffffff);
356✔
334
  tv[1].u64 = U64x(7fffffff,ffffffff);
356✔
335
  tv = LJ_KSIMD(J, LJ_KSIMD_NEG);
356✔
336
  tv[0].u64 = U64x(80000000,00000000);
356✔
337
  tv[1].u64 = U64x(80000000,00000000);
356✔
338

339
  /* Initialize 32/64 bit constants. */
340
#if LJ_TARGET_X86ORX64
341
  J->k64[LJ_K64_TOBIT].u64 = U64x(43380000,00000000);
356✔
342
#if LJ_32
343
  J->k64[LJ_K64_M2P64_31].u64 = U64x(c1e00000,00000000);
344
#endif
345
  J->k64[LJ_K64_2P64].u64 = U64x(43f00000,00000000);
356✔
346
  J->k32[LJ_K32_M2P64_31] = LJ_64 ? 0xdf800000 : 0xcf000000;
356✔
347
#endif
348
#if LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS64
349
  J->k64[LJ_K64_M2P64].u64 = U64x(c3f00000,00000000);
356✔
350
#endif
351
#if LJ_TARGET_PPC
352
  J->k32[LJ_K32_2P52_2P31] = 0x59800004;
353
  J->k32[LJ_K32_2P52] = 0x59800000;
354
#endif
355
#if LJ_TARGET_PPC || LJ_TARGET_MIPS
356
  J->k32[LJ_K32_2P31] = 0x4f000000;
357
#endif
358
#if LJ_TARGET_MIPS
359
  J->k64[LJ_K64_2P31].u64 = U64x(41e00000,00000000);
360
#if LJ_64
361
  J->k64[LJ_K64_2P63].u64 = U64x(43e00000,00000000);
362
  J->k32[LJ_K32_2P63] = 0x5f000000;
363
  J->k32[LJ_K32_M2P64] = 0xdf800000;
364
#endif
365
#endif
366
}
356✔
367

368
/* Free everything associated with the JIT compiler state. */
369
void lj_trace_freestate(global_State *g)
346✔
370
{
371
  jit_State *J = G2J(g);
346✔
372
#ifdef LUA_USE_ASSERT
373
  {  /* This assumes all traces have already been freed. */
374
    ptrdiff_t i;
375
    for (i = 1; i < (ptrdiff_t)J->sizetrace; i++)
376
      lj_assertG(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL,
377
                 "trace still allocated");
378
  }
379
#endif
380
  lj_mcode_free(J);
346✔
381
  lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
346✔
382
  lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
346✔
383
  lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);
346✔
384
  lj_mem_freevec(g, J->trace, J->sizetrace, GCRef);
346✔
385
}
346✔
386

387
/* -- Penalties and blacklisting ------------------------------------------ */
388

389
/* Blacklist a bytecode instruction. */
390
static void blacklist_pc(GCproto *pt, BCIns *pc)
8✔
391
{
392
  setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP);
8✔
393
  pt->flags |= PROTO_ILOOP;
8✔
394
}
395

396
/* Penalize a bytecode instruction. */
397
static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e)
2,130✔
398
{
399
  uint32_t i, val = PENALTY_MIN;
2,130✔
400
  for (i = 0; i < PENALTY_SLOTS; i++)
110,002✔
401
    if (mref(J->penalty[i].pc, const BCIns) == pc) {  /* Cache slot found? */
108,590✔
402
      /* First try to bump its hotcount several times. */
403
      val = ((uint32_t)J->penalty[i].val << 1) +
718✔
404
            LJ_PRNG_BITS(J, PENALTY_RNDBITS);
718✔
405
      if (val > PENALTY_MAX) {
718✔
406
        blacklist_pc(pt, pc);  /* Blacklist it, if that didn't help. */
8✔
407
        return;
8✔
408
      }
409
      goto setpenalty;
710✔
410
    }
411
  /* Assign a new penalty cache slot. */
412
  i = J->penaltyslot;
1,412✔
413
  J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1);
1,412✔
414
  setmref(J->penalty[i].pc, pc);
1,412✔
415
setpenalty:
2,122✔
416
  J->penalty[i].val = (uint16_t)val;
2,122✔
417
  J->penalty[i].reason = e;
2,122✔
418
  hotcount_set(J2GG(J), pc+1, val);
2,122✔
419
}
420

421
/* -- Trace compiler state machine ---------------------------------------- */
422

423
/* Start tracing. */
424
static void trace_start(jit_State *J)
24,288✔
425
{
426
  lua_State *L;
24,288✔
427
  TraceNo traceno;
24,288✔
428

429
  if ((J->pt->flags & PROTO_NOJIT)) {  /* JIT disabled for this proto? */
24,288✔
430
    if (J->parent == 0 && J->exitno == 0) {
85✔
431
      /* Lazy bytecode patching to disable hotcount events. */
432
      lj_assertJ(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
85✔
433
                 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF,
434
                 "bad hot bytecode %d", bc_op(*J->pc));
435
      setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP);
85✔
436
      J->pt->flags |= PROTO_ILOOP;
85✔
437
    }
438
    J->state = LJ_TRACE_IDLE;  /* Silently ignored. */
85✔
439
    return;
85✔
440
  }
441

442
  /* Get a new trace number. */
443
  traceno = trace_findfree(J);
24,203✔
444
  if (LJ_UNLIKELY(traceno == 0)) {  /* No free trace? */
24,203✔
445
    lj_assertJ((J2G(J)->hookmask & HOOK_GC) == 0,
×
446
               "recorder called from GC hook");
447
    lj_trace_flushall(J->L);
×
448
    J->state = LJ_TRACE_IDLE;  /* Silently ignored. */
×
449
    return;
×
450
  }
451
  setgcrefp(J->trace[traceno], &J->cur);
24,203✔
452

453
  /* Setup enough of the current trace to be able to send the vmevent. */
454
  memset(&J->cur, 0, sizeof(GCtrace));
24,203✔
455
  J->cur.traceno = traceno;
24,203✔
456
  J->cur.nins = J->cur.nk = REF_BASE;
24,203✔
457
  J->cur.ir = J->irbuf;
24,203✔
458
  J->cur.snap = J->snapbuf;
24,203✔
459
  J->cur.snapmap = J->snapmapbuf;
24,203✔
460
  J->mergesnap = 0;
24,203✔
461
  J->needsnap = 0;
24,203✔
462
  J->bcskip = 0;
24,203✔
463
  J->guardemit.irt = 0;
24,203✔
464
  J->postproc = LJ_POST_NONE;
24,203✔
465
  lj_resetsplit(J);
24,203✔
466
  J->retryrec = 0;
24,203✔
467
  J->ktrace = 0;
24,203✔
468
  setgcref(J->cur.startpt, obj2gco(J->pt));
24,203✔
469

470
  L = J->L;
24,203✔
471
  lj_vmevent_send(L, TRACE,
24,203✔
472
    setstrV(L, L->top++, lj_str_newlit(L, "start"));
473
    setintV(L->top++, traceno);
474
    setfuncV(L, L->top++, J->fn);
475
    setintV(L->top++, proto_bcpos(J->pt, J->pc));
476
    if (J->parent) {
477
      setintV(L->top++, J->parent);
478
      setintV(L->top++, J->exitno);
479
    } else {
480
      BCOp op = bc_op(*J->pc);
481
      if (op == BC_CALLM || op == BC_CALL || op == BC_ITERC) {
482
        setintV(L->top++, J->exitno);  /* Parent of stitched trace. */
483
        setintV(L->top++, -1);
484
      }
485
    }
486
  );
24,203✔
487
  lj_record_setup(J);
24,203✔
488
}
489

490
/* Stop tracing. */
491
static void trace_stop(jit_State *J)
20,421✔
492
{
493
  BCIns *pc = mref(J->cur.startpc, BCIns);
20,421✔
494
  BCOp op = bc_op(J->cur.startins);
20,421✔
495
  GCproto *pt = &gcref(J->cur.startpt)->pt;
20,421✔
496
  TraceNo traceno = J->cur.traceno;
20,421✔
497
  GCtrace *T = J->curfinal;
20,421✔
498
  lua_State *L;
20,421✔
499

500
  switch (op) {
20,421✔
501
  case BC_FORL:
2,176✔
502
    setbc_op(pc+bc_j(J->cur.startins), BC_JFORI);  /* Patch FORI, too. */
2,176✔
503
    /* fallthrough */
504
  case BC_LOOP:
3,766✔
505
  case BC_ITERL:
506
  case BC_FUNCF:
507
    /* Patch bytecode of starting instruction in root trace. */
508
    setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP);
3,766✔
509
    setbc_d(pc, traceno);
3,766✔
510
  addroot:
3,767✔
511
    /* Add to root trace chain in prototype. */
512
    J->cur.nextroot = pt->trace;
3,767✔
513
    pt->trace = (TraceNo1)traceno;
3,767✔
514
    break;
3,767✔
515
  case BC_RET:
1✔
516
  case BC_RET0:
517
  case BC_RET1:
518
    *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno);
1✔
519
    goto addroot;
1✔
520
  case BC_JMP:
16,574✔
521
    /* Patch exit branch in parent to side trace entry. */
522
    lj_assertJ(J->parent != 0 && J->cur.root != 0, "not a side trace");
16,574✔
523
    lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode);
16,574✔
524
    /* Avoid compiling a side trace twice (stack resizing uses parent exit). */
525
    {
526
      SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
16,574✔
527
      snap->count = SNAPCOUNT_DONE;
16,574✔
528
      if (J->cur.topslot > snap->topslot) snap->topslot = J->cur.topslot;
16,574✔
529
    }
530
    /* Add to side trace chain in root trace. */
531
    {
532
      GCtrace *root = traceref(J, J->cur.root);
16,574✔
533
      root->nchild++;
16,574✔
534
      J->cur.nextside = root->nextside;
16,574✔
535
      root->nextside = (TraceNo1)traceno;
16,574✔
536
    }
537
    break;
16,574✔
538
  case BC_CALLM:
80✔
539
  case BC_CALL:
540
  case BC_ITERC:
541
    /* Trace stitching: patch link of previous trace. */
542
    traceref(J, J->exitno)->link = traceno;
80✔
543
    break;
80✔
544
  default:
545
    lj_assertJ(0, "bad stop bytecode %d", op);
546
    break;
547
  }
548

549
  /* Commit new mcode only after all patching is done. */
550
  lj_mcode_commit(J, J->cur.mcode);
20,421✔
551
  J->postproc = LJ_POST_NONE;
20,420✔
552
  trace_save(J, T);
20,420✔
553

554
  L = J->L;
20,420✔
555
  lj_vmevent_send(L, TRACE,
20,420✔
556
    setstrV(L, L->top++, lj_str_newlit(L, "stop"));
557
    setintV(L->top++, traceno);
558
    setfuncV(L, L->top++, J->fn);
559
  );
20,420✔
560
}
20,420✔
561

562
/* Start a new root trace for down-recursion. */
563
static int trace_downrec(jit_State *J)
1✔
564
{
565
  /* Restart recording at the return instruction. */
566
  lj_assertJ(J->pt != NULL, "no active prototype");
1✔
567
  lj_assertJ(bc_isret(bc_op(*J->pc)), "not at a return bytecode");
1✔
568
  if (bc_op(*J->pc) == BC_RETM) {
1✔
569
    J->ntraceabort++;
×
570
    return 0;  /* NYI: down-recursion with RETM. */
×
571
  }
572
  J->parent = 0;
1✔
573
  J->exitno = 0;
1✔
574
  J->state = LJ_TRACE_RECORD;
1✔
575
  trace_start(J);
1✔
576
  return 1;
1✔
577
}
578

579
/* Abort tracing. */
580
static int trace_abort(jit_State *J)
4,743✔
581
{
582
  lua_State *L = J->L;
4,743✔
583
  TraceError e = LJ_TRERR_RECERR;
4,743✔
584
  TraceNo traceno;
4,743✔
585

586
  J->postproc = LJ_POST_NONE;
4,743✔
587
  lj_mcode_abort(J);
4,743✔
588
  if (J->curfinal) {
4,743✔
589
    lj_trace_free(J2G(J), J->curfinal);
1,172✔
590
    J->curfinal = NULL;
1,172✔
591
  }
592
  if (tvisnumber(L->top-1))
4,743✔
593
    e = (TraceError)numberVint(L->top-1);
4,742✔
594
  if (e == LJ_TRERR_MCODELM) {
4,742✔
595
    L->top--;  /* Remove error object */
961✔
596
    J->state = LJ_TRACE_ASM;
961✔
597
    return 1;  /* Retry ASM with new MCode area. */
961✔
598
  }
599
  /* Penalize or blacklist starting bytecode instruction. */
600
  if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
3,782✔
601
    if (J->exitno == 0) {
2,144✔
602
      BCIns *startpc = mref(J->cur.startpc, BCIns);
2,130✔
603
      if (e == LJ_TRERR_RETRY)
2,130✔
604
        hotcount_set(J2GG(J), startpc+1, 1);  /* Immediate retry. */
×
605
      else
606
        penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e);
2,130✔
607
    } else {
608
      traceref(J, J->exitno)->link = J->exitno;  /* Self-link is blacklisted. */
14✔
609
    }
610
  }
611

612
  /* Is there anything to abort? */
613
  traceno = J->cur.traceno;
3,782✔
614
  if (traceno) {
3,782✔
615
    ptrdiff_t errobj = savestack(L, L->top-1);  /* Stack may be resized. */
3,781✔
616
    J->cur.link = 0;
3,781✔
617
    J->cur.linktype = LJ_TRLINK_NONE;
3,781✔
618
    lj_vmevent_send(L, TRACE,
3,784✔
619
      TValue *frame;
620
      const BCIns *pc;
621
      GCfunc *fn;
622
      setstrV(L, L->top++, lj_str_newlit(L, "abort"));
623
      setintV(L->top++, traceno);
624
      /* Find original Lua function call to generate a better error message. */
625
      frame = J->L->base-1;
626
      pc = J->pc;
627
      while (!isluafunc(frame_func(frame))) {
628
        pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1;
629
        frame = frame_prev(frame);
630
      }
631
      fn = frame_func(frame);
632
      setfuncV(L, L->top++, fn);
633
      setintV(L->top++, proto_bcpos(funcproto(fn), pc));
634
      copyTV(L, L->top++, restorestack(L, errobj));
635
      copyTV(L, L->top++, &J->errinfo);
636
    );
3,781✔
637
    /* Drop aborted trace after the vmevent (which may still access it). */
638
    setgcrefnull(J->trace[traceno]);
3,781✔
639
    if (traceno < J->freetrace)
3,781✔
640
      J->freetrace = traceno;
3,746✔
641
    J->cur.traceno = 0;
3,781✔
642
  }
643
  L->top--;  /* Remove error object */
3,782✔
644
  if (e == LJ_TRERR_DOWNREC)
3,782✔
645
    return trace_downrec(J);
2✔
646
  else if (e == LJ_TRERR_MCODEAL)
3,781✔
647
    lj_trace_flushall(L);
127✔
648
  J->ntraceabort++;
3,781✔
649
  return 0;
3,781✔
650
}
651

652
/* Perform pending re-patch of a bytecode instruction. */
653
static LJ_AINLINE void trace_pendpatch(jit_State *J, int force)
9,216,227✔
654
{
655
  if (LJ_UNLIKELY(J->patchpc)) {
9,216,227✔
656
    if (force || J->bcskip == 0) {
802,644✔
657
      *J->patchpc = J->patchins;
802,641✔
658
      J->patchpc = NULL;
802,641✔
659
    } else {
660
      J->bcskip = 0;
3✔
661
    }
662
  }
663
}
664

665
/* State machine for the trace compiler. Protected callback. */
666
static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud)
9,219,869✔
667
{
668
  jit_State *J = (jit_State *)ud;
9,219,869✔
669
  UNUSED(dummy);
9,262,107✔
670
  do {
9,262,107✔
671
  retry:
41,276✔
672
    switch (J->state) {
9,262,107✔
673
    case LJ_TRACE_START:
24,287✔
674
      J->state = LJ_TRACE_RECORD;  /* trace_start() may change state. */
24,287✔
675
      trace_start(J);
24,287✔
676
      lj_dispatch_update(J2G(J));
24,286✔
677
      break;
24,286✔
678

679
    case LJ_TRACE_RECORD:
680
      trace_pendpatch(J, 0);
9,190,840✔
681
      setvmstate(J2G(J), RECORD);
9,190,840✔
682
      lj_vmevent_send_(L, RECORD,
9,190,840✔
683
        /* Save/restore state for trace recorder. */
684
        TValue savetv = J2G(J)->tmptv;
685
        TValue savetv2 = J2G(J)->tmptv2;
686
        TraceNo parent = J->parent;
687
        ExitNo exitno = J->exitno;
688
        setintV(L->top++, J->cur.traceno);
689
        setfuncV(L, L->top++, J->fn);
690
        setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1);
691
        setintV(L->top++, J->framedepth);
692
      ,
693
        J2G(J)->tmptv = savetv;
694
        J2G(J)->tmptv2 = savetv2;
695
        J->parent = parent;
696
        J->exitno = exitno;
697
      );
9,190,840✔
698
      lj_record_ins(J);
9,190,840✔
699
      break;
9,190,840✔
700

701
    case LJ_TRACE_END:
702
      trace_pendpatch(J, 1);
20,644✔
703
      J->loopref = 0;
20,644✔
704
      if ((J->flags & JIT_F_OPT_LOOP) &&
20,644✔
705
          J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) {
19,189✔
706
        setvmstate(J2G(J), OPT);
1,800✔
707
        lj_opt_dce(J);
1,800✔
708
        if (lj_opt_loop(J)) {  /* Loop optimization failed? */
1,800✔
709
          J->cur.link = 0;
12✔
710
          J->cur.linktype = LJ_TRLINK_NONE;
12✔
711
          J->loopref = J->cur.nins;
12✔
712
          J->state = LJ_TRACE_RECORD;  /* Try to continue recording. */
12✔
713
          break;
12✔
714
        }
715
        J->loopref = J->chain[IR_LOOP];  /* Needed by assembler. */
1,788✔
716
      }
717
      lj_opt_split(J);
20,632✔
718
      lj_opt_sink(J);
20,632✔
719
      if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE;
20,632✔
720
      J->state = LJ_TRACE_ASM;
20,632✔
721
      break;
20,632✔
722

723
    case LJ_TRACE_ASM:
21,593✔
724
      setvmstate(J2G(J), ASM);
21,593✔
725
      lj_asm_trace(J, &J->cur);
21,593✔
726
      trace_stop(J);
20,421✔
727
      setvmstate(J2G(J), INTERP);
20,420✔
728
      J->state = LJ_TRACE_IDLE;
20,420✔
729
      lj_dispatch_update(J2G(J));
20,420✔
730
      return NULL;
20,420✔
731

732
    default:  /* Trace aborted asynchronously. */
784✔
733
      setintV(L->top++, (int32_t)LJ_TRERR_RECERR);
784✔
734
      /* fallthrough */
735
    case LJ_TRACE_ERR:
4,743✔
736
      trace_pendpatch(J, 1);
4,743✔
737
      if (trace_abort(J))
4,743✔
738
        goto retry;
962✔
739
      setvmstate(J2G(J), INTERP);
3,781✔
740
      J->state = LJ_TRACE_IDLE;
3,781✔
741
      lj_dispatch_update(J2G(J));
3,781✔
742
      return NULL;
3,781✔
743
    }
744
  } while (J->state > LJ_TRACE_RECORD);
9,232,984✔
745
  return NULL;
746
}
747

748
/* -- Event handling ------------------------------------------------------ */
749

750
/* A bytecode instruction is about to be executed. Record it. */
751
void lj_trace_ins(jit_State *J, const BCIns *pc)
9,215,910✔
752
{
753
  /* Note: J->L must already be set. pc is the true bytecode PC here. */
754
  J->pc = pc;
9,215,910✔
755
  J->fn = curr_func(J->L);
9,215,910✔
756
  J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL;
9,215,910✔
757
  while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0)
9,219,869✔
758
    J->state = LJ_TRACE_ERR;
3,959✔
759
}
9,215,909✔
760

761
/* A hotcount triggered. Start recording a root trace. */
762
void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
20,369✔
763
{
764
  /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */
765
  ERRNO_SAVE
20,369✔
766
  /* Reset hotcount. */
767
  hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP);
20,369✔
768
  /* Only start a new trace if not recording or inside __gc call or vmevent. */
769
  if (J->state == LJ_TRACE_IDLE &&
20,369✔
770
      !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
20,356✔
771
    J->parent = 0;  /* Root trace. */
5,981✔
772
    J->exitno = 0;
5,981✔
773
    J->state = LJ_TRACE_START;
5,981✔
774
    lj_trace_ins(J, pc-1);
5,981✔
775
  }
776
  ERRNO_RESTORE
20,369✔
777
}
20,369✔
778

779
/* Check for a hot side exit. If yes, start recording a side trace. */
780
static void trace_hotside(jit_State *J, const BCIns *pc)
260,904✔
781
{
782
  SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
260,904✔
783
  if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
260,904✔
784
      isluafunc(curr_func(J->L)) &&
260,904✔
785
      snap->count != SNAPCOUNT_DONE &&
260,888✔
786
      ++snap->count >= J->param[JIT_P_hotexit]) {
260,861✔
787
    lj_assertJ(J->state == LJ_TRACE_IDLE, "hot side exit while recording");
18,212✔
788
    /* J->parent is non-zero for a side trace. */
789
    J->state = LJ_TRACE_START;
18,212✔
790
    lj_trace_ins(J, pc);
18,212✔
791
  }
792
}
260,904✔
793

794
/* Stitch a new trace to the previous trace. */
795
void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc)
141✔
796
{
797
  /* Only start a new trace if not recording or inside __gc call or vmevent. */
798
  if (J->state == LJ_TRACE_IDLE &&
141✔
799
      !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
94✔
800
    J->parent = 0;  /* Have to treat it like a root trace. */
94✔
801
    /* J->exitno is set to the invoking trace. */
802
    J->state = LJ_TRACE_START;
94✔
803
    lj_trace_ins(J, pc);
94✔
804
  }
805
}
141✔
806

807

808
/* Tiny struct to pass data to protected call. */
809
typedef struct ExitDataCP {
810
  jit_State *J;
811
  void *exptr;                /* Pointer to exit state. */
812
  const BCIns *pc;        /* Restart interpreter at this PC. */
813
} ExitDataCP;
814

815
/* Need to protect lj_snap_restore because it may throw. */
816
static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud)
261,524✔
817
{
818
  ExitDataCP *exd = (ExitDataCP *)ud;
261,524✔
819
  /* Always catch error here and don't call error function. */
820
  cframe_errfunc(L->cframe) = 0;
261,524✔
821
  cframe_nres(L->cframe) = -2*LUAI_MAXSTACK*(int)sizeof(TValue);
261,524✔
822
  exd->pc = lj_snap_restore(exd->J, exd->exptr);
261,524✔
823
  UNUSED(dummy);
261,519✔
824
  return NULL;
261,519✔
825
}
826

827
#ifndef LUAJIT_DISABLE_VMEVENT
828
/* Push all registers from exit state. */
829
static void trace_exit_regs(lua_State *L, ExitState *ex)
830
{
831
  int32_t i;
832
  setintV(L->top++, RID_NUM_GPR);
833
  setintV(L->top++, RID_NUM_FPR);
834
  for (i = 0; i < RID_NUM_GPR; i++) {
835
    if (sizeof(ex->gpr[i]) == sizeof(int32_t))
836
      setintV(L->top++, (int32_t)ex->gpr[i]);
837
    else
838
      setnumV(L->top++, (lua_Number)ex->gpr[i]);
839
  }
840
#if !LJ_SOFTFP
841
  for (i = 0; i < RID_NUM_FPR; i++) {
842
    setnumV(L->top, ex->fpr[i]);
843
    if (LJ_UNLIKELY(tvisnan(L->top)))
844
      setnanV(L->top);
845
    L->top++;
846
  }
847
#endif
848
}
849
#endif
850

851
#if defined(EXITSTATE_PCREG) || (LJ_UNWIND_JIT && !EXITTRACE_VMSTATE)
852
/* Determine trace number from pc of exit instruction. */
853
static TraceNo trace_exit_find(jit_State *J, MCode *pc)
854
{
855
  TraceNo traceno;
856
  for (traceno = 1; traceno < J->sizetrace; traceno++) {
857
    GCtrace *T = traceref(J, traceno);
858
    if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode))
859
      return traceno;
860
  }
861
  lj_assertJ(0, "bad exit pc");
862
  return 0;
863
}
864
#endif
865

866
/* A trace exited. Restore interpreter state. */
867
int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
261,524✔
868
{
869
  ERRNO_SAVE
261,524✔
870
  lua_State *L = J->L;
261,524✔
871
  ExitState *ex = (ExitState *)exptr;
261,524✔
872
  ExitDataCP exd;
261,524✔
873
  int errcode, exitcode = J->exitcode;
261,524✔
874
  TValue exiterr;
261,524✔
875
  const BCIns *pc;
261,524✔
876
  void *cf;
261,524✔
877
  GCtrace *T;
261,524✔
878

879
  setnilV(&exiterr);
261,524✔
880
  if (exitcode) {  /* Trace unwound with error code. */
261,524✔
881
    J->exitcode = 0;
4✔
882
    copyTV(L, &exiterr, L->top-1);
4✔
883
  }
884

885
#ifdef EXITSTATE_PCREG
886
  J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]);
887
#endif
888
  T = traceref(J, J->parent); UNUSED(T);
261,524✔
889
#ifdef EXITSTATE_CHECKEXIT
890
  if (J->exitno == T->nsnap) {  /* Treat stack check like a parent exit. */
891
    lj_assertJ(T->root != 0, "stack check in root trace");
892
    J->exitno = T->ir[REF_BASE].op2;
893
    J->parent = T->ir[REF_BASE].op1;
894
    T = traceref(J, J->parent);
895
  }
896
#endif
897
  lj_assertJ(T != NULL && J->exitno < T->nsnap, "bad trace or exit number");
261,524✔
898
  exd.J = J;
261,524✔
899
  exd.exptr = exptr;
261,524✔
900
  errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
261,524✔
901
  if (errcode)
261,524✔
902
    return -errcode;  /* Return negated error code. */
5✔
903

904
  if (exitcode) copyTV(L, L->top++, &exiterr);  /* Anchor the error object. */
261,519✔
905

906
  if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)))
261,519✔
907
    lj_vmevent_send(L, TEXIT,
261,518✔
908
      lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
909
      setintV(L->top++, J->parent);
910
      setintV(L->top++, J->exitno);
911
      trace_exit_regs(L, ex);
912
    );
261,519✔
913

914
  pc = exd.pc;
261,519✔
915
  cf = cframe_raw(L->cframe);
261,519✔
916
  setcframe_pc(cf, pc);
261,519✔
917
  if (exitcode) {
261,519✔
918
    return -exitcode;
4✔
919
  } else if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) {
261,515✔
920
    /* Just exit to interpreter. */
921
  } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) {
261,514✔
922
    if (!(G(L)->hookmask & HOOK_GC))
608✔
923
      lj_gc_step(L);  /* Exited because of GC: drive GC forward. */
608✔
924
  } else if ((J->flags & JIT_F_ON)) {
260,906✔
925
    trace_hotside(J, pc);
260,904✔
926
  }
927
  if (bc_op(*pc) == BC_JLOOP) {
261,515✔
928
    BCIns *retpc = &traceref(J, bc_d(*pc))->startins;
12✔
929
    if (bc_isret(bc_op(*retpc))) {
12✔
930
      if (J->state == LJ_TRACE_RECORD) {
3✔
931
        J->patchins = *pc;
3✔
932
        J->patchpc = (BCIns *)pc;
3✔
933
        *J->patchpc = *retpc;
3✔
934
        J->bcskip = 1;
3✔
935
      } else {
936
        pc = retpc;
×
937
        setcframe_pc(cf, pc);
×
938
      }
939
    }
940
  }
941
  /* Return MULTRES or 0. */
942
  ERRNO_RESTORE
261,515✔
943
  switch (bc_op(*pc)) {
261,515✔
944
  case BC_CALLM: case BC_CALLMT:
1,411✔
945
    return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) - LJ_FR2);
1,411✔
946
  case BC_RETM:
×
947
    return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc));
×
948
  case BC_TSETM:
×
949
    return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc));
×
950
  default:
260,104✔
951
    if (bc_op(*pc) >= BC_FUNCF)
260,104✔
952
      return (int)((BCReg)(L->top - L->base) + 1);
16✔
953
    return 0;
954
  }
955
}
956

957
#if LJ_UNWIND_JIT
958
/* Given an mcode address determine trace exit address for unwinding. */
959
uintptr_t LJ_FASTCALL lj_trace_unwind(jit_State *J, uintptr_t addr, ExitNo *ep)
4✔
960
{
961
#if EXITTRACE_VMSTATE
962
  TraceNo traceno = J2G(J)->vmstate;
4✔
963
#else
964
  TraceNo traceno = trace_exit_find(J, (MCode *)addr);
965
#endif
966
  GCtrace *T = traceref(J, traceno);
4✔
967
  if (T
4✔
968
#if EXITTRACE_VMSTATE
969
      && addr >= (uintptr_t)T->mcode && addr < (uintptr_t)T->mcode + T->szmcode
4✔
970
#endif
971
     ) {
972
    SnapShot *snap = T->snap;
4✔
973
    SnapNo lo = 0, exitno = T->nsnap;
4✔
974
    uintptr_t ofs = (uintptr_t)((MCode *)addr - T->mcode);  /* MCode units! */
4✔
975
    /* Rightmost binary search for mcode offset to determine exit number. */
976
    do {
16✔
977
      SnapNo mid = (lo+exitno) >> 1;
16✔
978
      if (ofs < snap[mid].mcofs) exitno = mid; else lo = mid + 1;
16✔
979
    } while (lo < exitno);
16✔
980
    exitno--;
4✔
981
    *ep = exitno;
4✔
982
#ifdef EXITSTUBS_PER_GROUP
983
    return (uintptr_t)exitstub_addr(J, exitno);
4✔
984
#else
985
    return (uintptr_t)exitstub_trace_addr(T, exitno);
986
#endif
987
  }
988
  lj_assertJ(0, "bad exit pc");
989
  return 0;
990
}
991
#endif
992

993
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc