• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 11382361254

17 Oct 2024 09:16AM UTC coverage: 93.013% (+0.04%) from 92.974%
11382361254

push

github

Buristan
Limit CSE for IR_CARG to fix loop optimizations.

Thanks to Peter Cawley.

(cherry picked from commit 3bdc6498c)

`IR_CALLXS` for the vararg function contains `IR_CARG(fptr, ctid)` as
the second operand. The `loop_emit_phi()` scans only the first operand
of the IR, so the second is not marked as PHI. In this case, when the IR
appears in both the invariant and variant parts of the loop, CSE may
remove it and thus lead to incorrect emitting results.

This patch tweaks the CSE rules to avoid CSE across the `IR_LOOP`.

Sergey Kaplun:
* added the description and the test for the problem

Part of tarantool/tarantool#10199

Reviewed-by: Sergey Bronnikov <sergeyb@tarantool.org>
Reviewed-by: Maxim Kokryashkin <m.kokryashkin@tarantool.org>
Signed-off-by: Sergey Kaplun <skaplun@tarantool.org>

5697 of 6033 branches covered (94.43%)

Branch coverage included in aggregate %.

4 of 4 new or added lines in 1 file covered. (100.0%)

3 existing lines in 3 files now uncovered.

21725 of 23449 relevant lines covered (92.65%)

2971813.46 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.42
/src/lj_trace.c
1
/*
2
** Trace management.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_trace_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10

11
#if LJ_HASJIT
12

13
#include "lj_gc.h"
14
#include "lj_err.h"
15
#include "lj_debug.h"
16
#include "lj_str.h"
17
#include "lj_frame.h"
18
#include "lj_state.h"
19
#include "lj_bc.h"
20
#include "lj_ir.h"
21
#include "lj_jit.h"
22
#include "lj_iropt.h"
23
#include "lj_mcode.h"
24
#include "lj_trace.h"
25
#include "lj_snap.h"
26
#include "lj_gdbjit.h"
27
#include "lj_record.h"
28
#include "lj_asm.h"
29
#include "lj_dispatch.h"
30
#include "lj_vm.h"
31
#include "lj_vmevent.h"
32
#include "lj_target.h"
33
#if LJ_HASMEMPROF
34
#include "lj_memprof.h"
35
#endif
36
#if LJ_HASSYSPROF
37
#include "lj_sysprof.h"
38
#endif
39

40
/* -- Error handling ------------------------------------------------------ */
41

42
/* Synchronous abort with error message. */
43
void lj_trace_err(jit_State *J, TraceError e)
3,808✔
44
{
45
  setnilV(&J->errinfo);  /* No error info. */
3,808✔
46
  setintV(J->L->top++, (int32_t)e);
3,808✔
47
  lj_err_throw(J->L, LUA_ERRRUN);
3,808✔
48
}
49

50
/* Synchronous abort with error message and error info. */
51
void lj_trace_err_info(jit_State *J, TraceError e)
107✔
52
{
53
  setintV(J->L->top++, (int32_t)e);
107✔
54
  lj_err_throw(J->L, LUA_ERRRUN);
107✔
55
}
56

57
/* -- Trace management ---------------------------------------------------- */
58

59
/* The current trace is first assembled in J->cur. The variable length
60
** arrays point to shared, growable buffers (J->irbuf etc.). When trace
61
** recording ends successfully, the current trace and its data structures
62
** are copied to a new (compact) GCtrace object.
63
*/
64

65
/* Find a free trace number. */
66
static TraceNo trace_findfree(jit_State *J)
24,195✔
67
{
68
  MSize osz, lim;
24,195✔
69
  if (J->freetrace == 0)
24,195✔
70
    J->freetrace = 1;
535✔
71
  for (; J->freetrace < J->sizetrace; J->freetrace++)
24,768✔
72
    if (traceref(J, J->freetrace) == NULL)
24,523✔
73
      return J->freetrace++;
23,950✔
74
  /* Need to grow trace array. */
75
  lim = (MSize)J->param[JIT_P_maxtrace] + 1;
245✔
76
  if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535;
245✔
77
  osz = J->sizetrace;
245✔
78
  if (osz >= lim)
245✔
79
    return 0;  /* Too many traces. */
80
  lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef);
245✔
81
  for (; osz < J->sizetrace; osz++)
7,911✔
82
    setgcrefnull(J->trace[osz]);
7,666✔
83
  return J->freetrace;
245✔
84
}
85

86
#define TRACE_APPENDVEC(field, szfield, tp) \
87
  T->field = (tp *)p; \
88
  memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \
89
  p += J->cur.szfield*sizeof(tp);
90

91
#ifdef LUAJIT_USE_PERFTOOLS
92
/*
93
** Create symbol table of JIT-compiled code. For use with Linux perf tools.
94
** Example usage:
95
**   perf record -f -e cycles luajit test.lua
96
**   perf report -s symbol
97
**   rm perf.data /tmp/perf-*.map
98
*/
99
#include <stdio.h>
100
#include <unistd.h>
101

102
static void perftools_addtrace(GCtrace *T)
103
{
104
  static FILE *fp;
105
  GCproto *pt = &gcref(T->startpt)->pt;
106
  const BCIns *startpc = mref(T->startpc, const BCIns);
107
  const char *name = proto_chunknamestr(pt);
108
  BCLine lineno;
109
  if (name[0] == '@' || name[0] == '=')
110
    name++;
111
  else
112
    name = "(string)";
113
  lj_assertX(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc,
114
             "trace PC out of range");
115
  lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
116
  if (!fp) {
117
    char fname[40];
118
    sprintf(fname, "/tmp/perf-%d.map", getpid());
119
    if (!(fp = fopen(fname, "w"))) return;
120
    setlinebuf(fp);
121
  }
122
  fprintf(fp, "%lx %x TRACE_%d::%s:%u\n",
123
          (long)T->mcode, T->szmcode, T->traceno, name, lineno);
124
}
125
#endif
126

127
/* Allocate space for copy of T. */
128
GCtrace * LJ_FASTCALL lj_trace_alloc(lua_State *L, GCtrace *T)
21,787✔
129
{
130
  size_t sztr = ((sizeof(GCtrace)+7)&~7);
21,787✔
131
  size_t szins = (T->nins-T->nk)*sizeof(IRIns);
21,787✔
132
  size_t sz = sztr + szins +
21,787✔
133
              T->nsnap*sizeof(SnapShot) +
21,787✔
134
              T->nsnapmap*sizeof(SnapEntry);
21,787✔
135
  GCtrace *T2 = lj_mem_newt(L, (MSize)sz, GCtrace);
21,787✔
136
  char *p = (char *)T2 + sztr;
21,787✔
137
  T2->gct = ~LJ_TTRACE;
21,787✔
138
  T2->marked = 0;
21,787✔
139
  T2->traceno = 0;
21,787✔
140
  T2->ir = (IRIns *)p - T->nk;
21,787✔
141
  T2->nins = T->nins;
21,787✔
142
  T2->nk = T->nk;
21,787✔
143
  T2->nsnap = T->nsnap;
21,787✔
144
  T2->nsnapmap = T->nsnapmap;
21,787✔
145
  memcpy(p, T->ir + T->nk, szins);
21,787✔
146
  L2J(L)->tracenum++;
21,787✔
147
  return T2;
21,787✔
148
}
149

150
/* Save current trace by copying and compacting it. */
151
static void trace_save(jit_State *J, GCtrace *T)
20,422✔
152
{
153
  size_t sztr = ((sizeof(GCtrace)+7)&~7);
20,422✔
154
  size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns);
20,422✔
155
  char *p = (char *)T + sztr;
20,422✔
156
  memcpy(T, &J->cur, sizeof(GCtrace));
20,422✔
157
  setgcrefr(T->nextgc, J2G(J)->gc.root);
20,422✔
158
  setgcrefp(J2G(J)->gc.root, T);
20,422✔
159
  newwhite(J2G(J), T);
20,422✔
160
  T->gct = ~LJ_TTRACE;
20,422✔
161
  T->ir = (IRIns *)p - J->cur.nk;  /* The IR has already been copied above. */
20,422✔
162
  p += szins;
20,422✔
163
  TRACE_APPENDVEC(snap, nsnap, SnapShot)
20,422✔
164
  TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry)
20,422✔
165
  J->cur.traceno = 0;
20,422✔
166
  J->curfinal = NULL;
20,422✔
167
  setgcrefp(J->trace[T->traceno], T);
20,422✔
168
  lj_gc_barriertrace(J2G(J), T->traceno);
20,422✔
169
  lj_gdbjit_addtrace(J, T);
20,422✔
170
#ifdef LUAJIT_USE_PERFTOOLS
171
  perftools_addtrace(T);
172
#endif
173

174
  /* Add a new trace to the profiler. */
175
#if LJ_HASMEMPROF
176
  lj_memprof_add_trace(T);
20,422✔
177
#endif
178

179
#if LJ_HASSYSPROF
180
  lj_sysprof_add_trace(T);
20,422✔
181
#endif
182
}
20,422✔
183

184
void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T)
21,784✔
185
{
186
  jit_State *J = G2J(g);
21,784✔
187
  if (T->traceno) {
21,784✔
188
    lj_gdbjit_deltrace(J, T);
3,272✔
189
    if (T->traceno < J->freetrace)
3,272✔
190
      J->freetrace = T->traceno;
3,178✔
191
    setgcrefnull(J->trace[T->traceno]);
3,272✔
192
  }
193
  lj_mem_free(g, T,
43,568✔
194
    ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
21,784✔
195
    T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry));
21,784✔
196
  J->tracenum--;
21,784✔
197
}
21,784✔
198

199
/* Re-enable compiling a prototype by unpatching any modified bytecode. */
200
void lj_trace_reenableproto(GCproto *pt)
51✔
201
{
202
  if ((pt->flags & PROTO_ILOOP)) {
51✔
203
    BCIns *bc = proto_bc(pt);
10✔
204
    BCPos i, sizebc = pt->sizebc;
10✔
205
    pt->flags &= ~PROTO_ILOOP;
10✔
206
    if (bc_op(bc[0]) == BC_IFUNCF)
10✔
UNCOV
207
      setbc_op(&bc[0], BC_FUNCF);
×
208
    for (i = 1; i < sizebc; i++) {
1,670✔
209
      BCOp op = bc_op(bc[i]);
1,660✔
210
      if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP)
1,660✔
211
        setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP);
10✔
212
    }
213
  }
214
}
51✔
215

216
/* Unpatch the bytecode modified by a root trace. */
217
static void trace_unpatch(jit_State *J, GCtrace *T)
218
{
219
  BCOp op = bc_op(T->startins);
220
  BCIns *pc = mref(T->startpc, BCIns);
221
  UNUSED(J);
222
  if (op == BC_JMP)
223
    return;  /* No need to unpatch branches in parent traces (yet). */
224
  switch (bc_op(*pc)) {
225
  case BC_JFORL:
226
    lj_assertJ(traceref(J, bc_d(*pc)) == T, "JFORL references other trace");
227
    *pc = T->startins;
228
    pc += bc_j(T->startins);
229
    lj_assertJ(bc_op(*pc) == BC_JFORI, "FORL does not point to JFORI");
230
    setbc_op(pc, BC_FORI);
231
    break;
232
  case BC_JITERL:
233
  case BC_JLOOP:
234
    lj_assertJ(op == BC_ITERL || op == BC_LOOP || bc_isret(op),
235
               "bad original bytecode %d", op);
236
    *pc = T->startins;
237
    break;
238
  case BC_JMP:
239
    lj_assertJ(op == BC_ITERL, "bad original bytecode %d", op);
240
    pc += bc_j(*pc)+2;
241
    if (bc_op(*pc) == BC_JITERL) {
242
      lj_assertJ(traceref(J, bc_d(*pc)) == T, "JITERL references other trace");
243
      *pc = T->startins;
244
    }
245
    break;
246
  case BC_JFUNCF:
247
    lj_assertJ(op == BC_FUNCF, "bad original bytecode %d", op);
248
    *pc = T->startins;
249
    break;
250
  default:  /* Already unpatched. */
251
    break;
252
  }
253
}
254

255
/* Flush a root trace. */
256
static void trace_flushroot(jit_State *J, GCtrace *T)
257
{
258
  GCproto *pt = &gcref(T->startpt)->pt;
259
  lj_assertJ(T->root == 0, "not a root trace");
260
  lj_assertJ(pt != NULL, "trace has no prototype");
261
  /* First unpatch any modified bytecode. */
262
  trace_unpatch(J, T);
263
  /* Unlink root trace from chain anchored in prototype. */
264
  if (pt->trace == T->traceno) {  /* Trace is first in chain. Easy. */
265
    pt->trace = T->nextroot;
266
  } else if (pt->trace) {  /* Otherwise search in chain of root traces. */
267
    GCtrace *T2 = traceref(J, pt->trace);
268
    if (T2) {
269
      for (; T2->nextroot; T2 = traceref(J, T2->nextroot))
270
        if (T2->nextroot == T->traceno) {
271
          T2->nextroot = T->nextroot;  /* Unlink from chain. */
272
          break;
273
        }
274
    }
275
  }
276
}
277

278
/* Flush a trace. Only root traces are considered. */
279
void lj_trace_flush(jit_State *J, TraceNo traceno)
54✔
280
{
281
  if (traceno > 0 && traceno < J->sizetrace) {
54✔
282
    GCtrace *T = traceref(J, traceno);
54✔
283
    if (T && T->root == 0)
54✔
284
      trace_flushroot(J, T);
51✔
285
  }
286
}
54✔
287

288
/* Flush all traces associated with a prototype. */
289
void lj_trace_flushproto(global_State *g, GCproto *pt)
219✔
290
{
291
  while (pt->trace != 0)
237✔
292
    trace_flushroot(G2J(g), traceref(G2J(g), pt->trace));
18✔
293
}
219✔
294

295
/* Flush all traces. */
296
int lj_trace_flushall(lua_State *L)
453✔
297
{
298
  jit_State *J = L2J(L);
453✔
299
  ptrdiff_t i;
453✔
300
  if ((J2G(J)->hookmask & HOOK_GC))
453✔
301
    return 1;
302
  for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) {
50,933✔
303
    GCtrace *T = traceref(J, i);
50,480✔
304
    if (T) {
50,480✔
305
      if (T->root == 0)
17,149✔
306
        trace_flushroot(J, T);
2,129✔
307
      lj_gdbjit_deltrace(J, T);
17,149✔
308
      T->traceno = T->link = 0;  /* Blacklist the link for cont_stitch. */
17,149✔
309
      setgcrefnull(J->trace[i]);
17,149✔
310
    }
311
  }
312
  J->cur.traceno = 0;
453✔
313
  J->freetrace = 0;
453✔
314
  /* Clear penalty cache. */
315
  memset(J->penalty, 0, sizeof(J->penalty));
453✔
316
  /* Free the whole machine code and invalidate all exit stub groups. */
317
  lj_mcode_free(J);
453✔
318
  memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
453✔
319
  lj_vmevent_send(L, TRACE,
453✔
320
    setstrV(L, L->top++, lj_str_newlit(L, "flush"));
321
  );
322
  return 0;
323
}
324

325
/* Initialize JIT compiler state. */
326
void lj_trace_initstate(global_State *g)
362✔
327
{
328
  jit_State *J = G2J(g);
362✔
329
  TValue *tv;
362✔
330

331
  /* Initialize aligned SIMD constants. */
332
  tv = LJ_KSIMD(J, LJ_KSIMD_ABS);
362✔
333
  tv[0].u64 = U64x(7fffffff,ffffffff);
362✔
334
  tv[1].u64 = U64x(7fffffff,ffffffff);
362✔
335
  tv = LJ_KSIMD(J, LJ_KSIMD_NEG);
362✔
336
  tv[0].u64 = U64x(80000000,00000000);
362✔
337
  tv[1].u64 = U64x(80000000,00000000);
362✔
338

339
  /* Initialize 32/64 bit constants. */
340
#if LJ_TARGET_X86ORX64
341
  J->k64[LJ_K64_TOBIT].u64 = U64x(43380000,00000000);
362✔
342
#if LJ_32
343
  J->k64[LJ_K64_M2P64_31].u64 = U64x(c1e00000,00000000);
344
#endif
345
  J->k64[LJ_K64_2P64].u64 = U64x(43f00000,00000000);
362✔
346
  J->k32[LJ_K32_M2P64_31] = LJ_64 ? 0xdf800000 : 0xcf000000;
362✔
347
#endif
348
#if LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS64
349
  J->k64[LJ_K64_M2P64].u64 = U64x(c3f00000,00000000);
362✔
350
#endif
351
#if LJ_TARGET_PPC
352
  J->k32[LJ_K32_2P52_2P31] = 0x59800004;
353
  J->k32[LJ_K32_2P52] = 0x59800000;
354
#endif
355
#if LJ_TARGET_PPC || LJ_TARGET_MIPS
356
  J->k32[LJ_K32_2P31] = 0x4f000000;
357
#endif
358
#if LJ_TARGET_MIPS
359
  J->k64[LJ_K64_2P31].u64 = U64x(41e00000,00000000);
360
#if LJ_64
361
  J->k64[LJ_K64_2P63].u64 = U64x(43e00000,00000000);
362
  J->k32[LJ_K32_2P63] = 0x5f000000;
363
  J->k32[LJ_K32_M2P64] = 0xdf800000;
364
#endif
365
#endif
366
}
362✔
367

368
/* Free everything associated with the JIT compiler state. */
369
void lj_trace_freestate(global_State *g)
352✔
370
{
371
  jit_State *J = G2J(g);
352✔
372
#ifdef LUA_USE_ASSERT
373
  {  /* This assumes all traces have already been freed. */
374
    ptrdiff_t i;
375
    for (i = 1; i < (ptrdiff_t)J->sizetrace; i++)
376
      lj_assertG(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL,
377
                 "trace still allocated");
378
  }
379
#endif
380
  lj_mcode_free(J);
352✔
381
  lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
352✔
382
  lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
352✔
383
  lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);
352✔
384
  lj_mem_freevec(g, J->trace, J->sizetrace, GCRef);
352✔
385
}
352✔
386

387
/* -- Penalties and blacklisting ------------------------------------------ */
388

389
/* Blacklist a bytecode instruction. */
390
static void blacklist_pc(GCproto *pt, BCIns *pc)
9✔
391
{
392
  setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP);
9✔
393
  pt->flags |= PROTO_ILOOP;
9✔
394
}
395

396
/* Penalize a bytecode instruction. */
397
static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e)
2,114✔
398
{
399
  uint32_t i, val = PENALTY_MIN;
2,114✔
400
  for (i = 0; i < PENALTY_SLOTS; i++)
106,389✔
401
    if (mref(J->penalty[i].pc, const BCIns) == pc) {  /* Cache slot found? */
105,000✔
402
      /* First try to bump its hotcount several times. */
403
      val = ((uint32_t)J->penalty[i].val << 1) +
725✔
404
            LJ_PRNG_BITS(J, PENALTY_RNDBITS);
725✔
405
      if (val > PENALTY_MAX) {
725✔
406
        blacklist_pc(pt, pc);  /* Blacklist it, if that didn't help. */
9✔
407
        return;
9✔
408
      }
409
      goto setpenalty;
716✔
410
    }
411
  /* Assign a new penalty cache slot. */
412
  i = J->penaltyslot;
1,389✔
413
  J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1);
1,389✔
414
  setmref(J->penalty[i].pc, pc);
1,389✔
415
setpenalty:
2,105✔
416
  J->penalty[i].val = (uint16_t)val;
2,105✔
417
  J->penalty[i].reason = e;
2,105✔
418
  hotcount_set(J2GG(J), pc+1, val);
2,105✔
419
}
420

421
/* -- Trace compiler state machine ---------------------------------------- */
422

423
/* Start tracing. */
424
static void trace_start(jit_State *J)
24,279✔
425
{
426
  lua_State *L;
24,279✔
427
  TraceNo traceno;
24,279✔
428

429
  if ((J->pt->flags & PROTO_NOJIT)) {  /* JIT disabled for this proto? */
24,279✔
430
    if (J->parent == 0 && J->exitno == 0) {
84✔
431
      /* Lazy bytecode patching to disable hotcount events. */
432
      lj_assertJ(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
84✔
433
                 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF,
434
                 "bad hot bytecode %d", bc_op(*J->pc));
435
      setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP);
84✔
436
      J->pt->flags |= PROTO_ILOOP;
84✔
437
    }
438
    J->state = LJ_TRACE_IDLE;  /* Silently ignored. */
84✔
439
    return;
84✔
440
  }
441

442
  /* Get a new trace number. */
443
  traceno = trace_findfree(J);
24,195✔
444
  if (LJ_UNLIKELY(traceno == 0)) {  /* No free trace? */
24,195✔
445
    lj_assertJ((J2G(J)->hookmask & HOOK_GC) == 0,
×
446
               "recorder called from GC hook");
447
    lj_trace_flushall(J->L);
×
448
    J->state = LJ_TRACE_IDLE;  /* Silently ignored. */
×
449
    return;
×
450
  }
451
  setgcrefp(J->trace[traceno], &J->cur);
24,195✔
452

453
  /* Setup enough of the current trace to be able to send the vmevent. */
454
  memset(&J->cur, 0, sizeof(GCtrace));
24,195✔
455
  J->cur.traceno = traceno;
24,195✔
456
  J->cur.nins = J->cur.nk = REF_BASE;
24,195✔
457
  J->cur.ir = J->irbuf;
24,195✔
458
  J->cur.snap = J->snapbuf;
24,195✔
459
  J->cur.snapmap = J->snapmapbuf;
24,195✔
460
  J->mergesnap = 0;
24,195✔
461
  J->needsnap = 0;
24,195✔
462
  J->bcskip = 0;
24,195✔
463
  J->guardemit.irt = 0;
24,195✔
464
  J->postproc = LJ_POST_NONE;
24,195✔
465
  lj_resetsplit(J);
24,195✔
466
  J->retryrec = 0;
24,195✔
467
  J->ktrace = 0;
24,195✔
468
  setgcref(J->cur.startpt, obj2gco(J->pt));
24,195✔
469

470
  L = J->L;
24,195✔
471
  lj_vmevent_send(L, TRACE,
24,195✔
472
    setstrV(L, L->top++, lj_str_newlit(L, "start"));
473
    setintV(L->top++, traceno);
474
    setfuncV(L, L->top++, J->fn);
475
    setintV(L->top++, proto_bcpos(J->pt, J->pc));
476
    if (J->parent) {
477
      setintV(L->top++, J->parent);
478
      setintV(L->top++, J->exitno);
479
    } else {
480
      BCOp op = bc_op(*J->pc);
481
      if (op == BC_CALLM || op == BC_CALL || op == BC_ITERC) {
482
        setintV(L->top++, J->exitno);  /* Parent of stitched trace. */
483
        setintV(L->top++, -1);
484
      }
485
    }
486
  );
24,195✔
487
  lj_record_setup(J);
24,195✔
488
}
489

490
/* Stop tracing. */
491
static void trace_stop(jit_State *J)
20,423✔
492
{
493
  BCIns *pc = mref(J->cur.startpc, BCIns);
20,423✔
494
  BCOp op = bc_op(J->cur.startins);
20,423✔
495
  GCproto *pt = &gcref(J->cur.startpt)->pt;
20,423✔
496
  TraceNo traceno = J->cur.traceno;
20,423✔
497
  GCtrace *T = J->curfinal;
20,423✔
498
  lua_State *L;
20,423✔
499

500
  switch (op) {
20,423✔
501
  case BC_FORL:
2,176✔
502
    setbc_op(pc+bc_j(J->cur.startins), BC_JFORI);  /* Patch FORI, too. */
2,176✔
503
    /* fallthrough */
504
  case BC_LOOP:
3,764✔
505
  case BC_ITERL:
506
  case BC_FUNCF:
507
    /* Patch bytecode of starting instruction in root trace. */
508
    setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP);
3,764✔
509
    setbc_d(pc, traceno);
3,764✔
510
  addroot:
3,765✔
511
    /* Add to root trace chain in prototype. */
512
    J->cur.nextroot = pt->trace;
3,765✔
513
    pt->trace = (TraceNo1)traceno;
3,765✔
514
    break;
3,765✔
515
  case BC_RET:
1✔
516
  case BC_RET0:
517
  case BC_RET1:
518
    *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno);
1✔
519
    goto addroot;
1✔
520
  case BC_JMP:
16,576✔
521
    /* Patch exit branch in parent to side trace entry. */
522
    lj_assertJ(J->parent != 0 && J->cur.root != 0, "not a side trace");
16,576✔
523
    lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode);
16,576✔
524
    /* Avoid compiling a side trace twice (stack resizing uses parent exit). */
525
    {
526
      SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
16,576✔
527
      snap->count = SNAPCOUNT_DONE;
16,576✔
528
      if (J->cur.topslot > snap->topslot) snap->topslot = J->cur.topslot;
16,576✔
529
    }
530
    /* Add to side trace chain in root trace. */
531
    {
532
      GCtrace *root = traceref(J, J->cur.root);
16,576✔
533
      root->nchild++;
16,576✔
534
      J->cur.nextside = root->nextside;
16,576✔
535
      root->nextside = (TraceNo1)traceno;
16,576✔
536
    }
537
    break;
16,576✔
538
  case BC_CALLM:
82✔
539
  case BC_CALL:
540
  case BC_ITERC:
541
    /* Trace stitching: patch link of previous trace. */
542
    traceref(J, J->exitno)->link = traceno;
82✔
543
    break;
82✔
544
  default:
545
    lj_assertJ(0, "bad stop bytecode %d", op);
546
    break;
547
  }
548

549
  /* Commit new mcode only after all patching is done. */
550
  lj_mcode_commit(J, J->cur.mcode);
20,423✔
551
  J->postproc = LJ_POST_NONE;
20,422✔
552
  trace_save(J, T);
20,422✔
553

554
  L = J->L;
20,422✔
555
  lj_vmevent_send(L, TRACE,
20,422✔
556
    setstrV(L, L->top++, lj_str_newlit(L, "stop"));
557
    setintV(L->top++, traceno);
558
    setfuncV(L, L->top++, J->fn);
559
  );
20,422✔
560
}
20,422✔
561

562
/* Start a new root trace for down-recursion. */
563
static int trace_downrec(jit_State *J)
1✔
564
{
565
  /* Restart recording at the return instruction. */
566
  lj_assertJ(J->pt != NULL, "no active prototype");
1✔
567
  lj_assertJ(bc_isret(bc_op(*J->pc)), "not at a return bytecode");
1✔
568
  if (bc_op(*J->pc) == BC_RETM) {
1✔
569
    J->ntraceabort++;
×
570
    return 0;  /* NYI: down-recursion with RETM. */
×
571
  }
572
  J->parent = 0;
1✔
573
  J->exitno = 0;
1✔
574
  J->state = LJ_TRACE_RECORD;
1✔
575
  trace_start(J);
1✔
576
  return 1;
1✔
577
}
578

579
/* Abort tracing. */
580
static int trace_abort(jit_State *J)
4,732✔
581
{
582
  lua_State *L = J->L;
4,732✔
583
  TraceError e = LJ_TRERR_RECERR;
4,732✔
584
  TraceNo traceno;
4,732✔
585

586
  J->postproc = LJ_POST_NONE;
4,732✔
587
  lj_mcode_abort(J);
4,732✔
588
  if (J->curfinal) {
4,732✔
589
    lj_trace_free(J2G(J), J->curfinal);
1,170✔
590
    J->curfinal = NULL;
1,170✔
591
  }
592
  if (tvisnumber(L->top-1))
4,732✔
593
    e = (TraceError)numberVint(L->top-1);
4,731✔
594
  if (e == LJ_TRERR_MCODELM) {
4,731✔
595
    L->top--;  /* Remove error object */
960✔
596
    J->state = LJ_TRACE_ASM;
960✔
597
    return 1;  /* Retry ASM with new MCode area. */
960✔
598
  }
599
  /* Penalize or blacklist starting bytecode instruction. */
600
  if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
3,772✔
601
    if (J->exitno == 0) {
2,128✔
602
      BCIns *startpc = mref(J->cur.startpc, BCIns);
2,114✔
603
      if (e == LJ_TRERR_RETRY)
2,114✔
604
        hotcount_set(J2GG(J), startpc+1, 1);  /* Immediate retry. */
×
605
      else
606
        penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e);
2,114✔
607
    } else {
608
      traceref(J, J->exitno)->link = J->exitno;  /* Self-link is blacklisted. */
14✔
609
    }
610
  }
611

612
  /* Is there anything to abort? */
613
  traceno = J->cur.traceno;
3,772✔
614
  if (traceno) {
3,772✔
615
    ptrdiff_t errobj = savestack(L, L->top-1);  /* Stack may be resized. */
3,771✔
616
    J->cur.link = 0;
3,771✔
617
    J->cur.linktype = LJ_TRLINK_NONE;
3,771✔
618
    lj_vmevent_send(L, TRACE,
3,774✔
619
      TValue *frame;
620
      const BCIns *pc;
621
      GCfunc *fn;
622
      setstrV(L, L->top++, lj_str_newlit(L, "abort"));
623
      setintV(L->top++, traceno);
624
      /* Find original Lua function call to generate a better error message. */
625
      frame = J->L->base-1;
626
      pc = J->pc;
627
      while (!isluafunc(frame_func(frame))) {
628
        pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1;
629
        frame = frame_prev(frame);
630
      }
631
      fn = frame_func(frame);
632
      setfuncV(L, L->top++, fn);
633
      setintV(L->top++, proto_bcpos(funcproto(fn), pc));
634
      copyTV(L, L->top++, restorestack(L, errobj));
635
      copyTV(L, L->top++, &J->errinfo);
636
    );
3,771✔
637
    /* Drop aborted trace after the vmevent (which may still access it). */
638
    setgcrefnull(J->trace[traceno]);
3,771✔
639
    if (traceno < J->freetrace)
3,771✔
640
      J->freetrace = traceno;
3,737✔
641
    J->cur.traceno = 0;
3,771✔
642
  }
643
  L->top--;  /* Remove error object */
3,772✔
644
  if (e == LJ_TRERR_DOWNREC)
3,772✔
645
    return trace_downrec(J);
2✔
646
  else if (e == LJ_TRERR_MCODEAL)
3,771✔
647
    lj_trace_flushall(L);
127✔
648
  J->ntraceabort++;
3,771✔
649
  return 0;
3,771✔
650
}
651

652
/* Perform pending re-patch of a bytecode instruction. */
653
static LJ_AINLINE void trace_pendpatch(jit_State *J, int force)
9,217,868✔
654
{
655
  if (LJ_UNLIKELY(J->patchpc)) {
9,217,868✔
656
    if (force || J->bcskip == 0) {
802,561✔
657
      *J->patchpc = J->patchins;
802,558✔
658
      J->patchpc = NULL;
802,558✔
659
    } else {
660
      J->bcskip = 0;
3✔
661
    }
662
  }
663
}
664

665
/* State machine for the trace compiler. Protected callback. */
666
static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud)
9,221,500✔
667
{
668
  jit_State *J = (jit_State *)ud;
9,221,500✔
669
  UNUSED(dummy);
9,263,739✔
670
  do {
9,263,739✔
671
  retry:
41,278✔
672
    switch (J->state) {
9,263,739✔
673
    case LJ_TRACE_START:
24,278✔
674
      J->state = LJ_TRACE_RECORD;  /* trace_start() may change state. */
24,278✔
675
      trace_start(J);
24,278✔
676
      lj_dispatch_update(J2G(J));
24,277✔
677
      break;
24,277✔
678

679
    case LJ_TRACE_RECORD:
680
      trace_pendpatch(J, 0);
9,192,491✔
681
      setvmstate(J2G(J), RECORD);
9,192,491✔
682
      lj_vmevent_send_(L, RECORD,
9,192,491✔
683
        /* Save/restore state for trace recorder. */
684
        TValue savetv = J2G(J)->tmptv;
685
        TValue savetv2 = J2G(J)->tmptv2;
686
        TraceNo parent = J->parent;
687
        ExitNo exitno = J->exitno;
688
        setintV(L->top++, J->cur.traceno);
689
        setfuncV(L, L->top++, J->fn);
690
        setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1);
691
        setintV(L->top++, J->framedepth);
692
      ,
693
        J2G(J)->tmptv = savetv;
694
        J2G(J)->tmptv2 = savetv2;
695
        J->parent = parent;
696
        J->exitno = exitno;
697
      );
9,192,491✔
698
      lj_record_ins(J);
9,192,491✔
699
      break;
9,192,491✔
700

701
    case LJ_TRACE_END:
702
      trace_pendpatch(J, 1);
20,645✔
703
      J->loopref = 0;
20,645✔
704
      if ((J->flags & JIT_F_OPT_LOOP) &&
20,645✔
705
          J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) {
19,195✔
706
        setvmstate(J2G(J), OPT);
1,803✔
707
        lj_opt_dce(J);
1,803✔
708
        if (lj_opt_loop(J)) {  /* Loop optimization failed? */
1,803✔
709
          J->cur.link = 0;
12✔
710
          J->cur.linktype = LJ_TRLINK_NONE;
12✔
711
          J->loopref = J->cur.nins;
12✔
712
          J->state = LJ_TRACE_RECORD;  /* Try to continue recording. */
12✔
713
          break;
12✔
714
        }
715
        J->loopref = J->chain[IR_LOOP];  /* Needed by assembler. */
1,791✔
716
      }
717
      lj_opt_split(J);
20,633✔
718
      lj_opt_sink(J);
20,633✔
719
      if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE;
20,633✔
720
      J->state = LJ_TRACE_ASM;
20,633✔
721
      break;
20,633✔
722

723
    case LJ_TRACE_ASM:
21,593✔
724
      setvmstate(J2G(J), ASM);
21,593✔
725
      lj_asm_trace(J, &J->cur);
21,593✔
726
      trace_stop(J);
20,423✔
727
      setvmstate(J2G(J), INTERP);
20,422✔
728
      J->state = LJ_TRACE_IDLE;
20,422✔
729
      lj_dispatch_update(J2G(J));
20,422✔
730
      return NULL;
20,422✔
731

732
    default:  /* Trace aborted asynchronously. */
826✔
733
      setintV(L->top++, (int32_t)LJ_TRERR_RECERR);
826✔
734
      /* fallthrough */
735
    case LJ_TRACE_ERR:
4,732✔
736
      trace_pendpatch(J, 1);
4,732✔
737
      if (trace_abort(J))
4,732✔
738
        goto retry;
961✔
739
      setvmstate(J2G(J), INTERP);
3,771✔
740
      J->state = LJ_TRACE_IDLE;
3,771✔
741
      lj_dispatch_update(J2G(J));
3,771✔
742
      return NULL;
3,771✔
743
    }
744
  } while (J->state > LJ_TRACE_RECORD);
9,234,678✔
745
  return NULL;
746
}
747

748
/* -- Event handling ------------------------------------------------------ */
749

750
/* A bytecode instruction is about to be executed. Record it. */
751
void lj_trace_ins(jit_State *J, const BCIns *pc)
9,217,594✔
752
{
753
  /* Note: J->L must already be set. pc is the true bytecode PC here. */
754
  J->pc = pc;
9,217,594✔
755
  J->fn = curr_func(J->L);
9,217,594✔
756
  J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL;
9,217,594✔
757
  while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0)
9,221,500✔
758
    J->state = LJ_TRACE_ERR;
3,906✔
759
}
9,217,593✔
760

761
/* A hotcount triggered. Start recording a root trace. */
762
void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
20,343✔
763
{
764
  /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */
765
  ERRNO_SAVE
20,343✔
766
  /* Reset hotcount. */
767
  hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP);
20,343✔
768
  /* Only start a new trace if not recording or inside __gc call or vmevent. */
769
  if (J->state == LJ_TRACE_IDLE &&
20,343✔
770
      !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
20,332✔
771
    J->parent = 0;  /* Root trace. */
5,962✔
772
    J->exitno = 0;
5,962✔
773
    J->state = LJ_TRACE_START;
5,962✔
774
    lj_trace_ins(J, pc-1);
5,962✔
775
  }
776
  ERRNO_RESTORE
20,343✔
777
}
20,343✔
778

779
/* Check for a hot side exit. If yes, start recording a side trace. */
780
static void trace_hotside(jit_State *J, const BCIns *pc)
260,908✔
781
{
782
  SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
260,908✔
783
  if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
260,908✔
784
      isluafunc(curr_func(J->L)) &&
260,908✔
785
      snap->count != SNAPCOUNT_DONE &&
260,884✔
786
      ++snap->count >= J->param[JIT_P_hotexit]) {
260,858✔
787
    lj_assertJ(J->state == LJ_TRACE_IDLE, "hot side exit while recording");
18,220✔
788
    /* J->parent is non-zero for a side trace. */
789
    J->state = LJ_TRACE_START;
18,220✔
790
    lj_trace_ins(J, pc);
18,220✔
791
  }
792
}
260,908✔
793

794
/* Stitch a new trace to the previous trace. */
795
void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc)
143✔
796
{
797
  /* Only start a new trace if not recording or inside __gc call or vmevent. */
798
  if (J->state == LJ_TRACE_IDLE &&
143✔
799
      !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
96✔
800
    J->parent = 0;  /* Have to treat it like a root trace. */
96✔
801
    /* J->exitno is set to the invoking trace. */
802
    J->state = LJ_TRACE_START;
96✔
803
    lj_trace_ins(J, pc);
96✔
804
  }
805
}
143✔
806

807

808
/* Tiny struct to pass data to protected call. */
809
typedef struct ExitDataCP {
810
  jit_State *J;
811
  void *exptr;                /* Pointer to exit state. */
812
  const BCIns *pc;        /* Restart interpreter at this PC. */
813
} ExitDataCP;
814

815
/* Need to protect lj_snap_restore because it may throw. */
816
static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud)
261,528✔
817
{
818
  ExitDataCP *exd = (ExitDataCP *)ud;
261,528✔
819
  /* Always catch error here and don't call error function. */
820
  cframe_errfunc(L->cframe) = 0;
261,528✔
821
  cframe_nres(L->cframe) = -2*LUAI_MAXSTACK*(int)sizeof(TValue);
261,528✔
822
  exd->pc = lj_snap_restore(exd->J, exd->exptr);
261,528✔
823
  UNUSED(dummy);
261,523✔
824
  return NULL;
261,523✔
825
}
826

827
#ifndef LUAJIT_DISABLE_VMEVENT
828
/* Push all registers from exit state. */
829
static void trace_exit_regs(lua_State *L, ExitState *ex)
830
{
831
  int32_t i;
832
  setintV(L->top++, RID_NUM_GPR);
833
  setintV(L->top++, RID_NUM_FPR);
834
  for (i = 0; i < RID_NUM_GPR; i++) {
835
    if (sizeof(ex->gpr[i]) == sizeof(int32_t))
836
      setintV(L->top++, (int32_t)ex->gpr[i]);
837
    else
838
      setnumV(L->top++, (lua_Number)ex->gpr[i]);
839
  }
840
#if !LJ_SOFTFP
841
  for (i = 0; i < RID_NUM_FPR; i++) {
842
    setnumV(L->top, ex->fpr[i]);
843
    if (LJ_UNLIKELY(tvisnan(L->top)))
844
      setnanV(L->top);
845
    L->top++;
846
  }
847
#endif
848
}
849
#endif
850

851
#if defined(EXITSTATE_PCREG) || (LJ_UNWIND_JIT && !EXITTRACE_VMSTATE)
852
/* Determine trace number from pc of exit instruction. */
853
static TraceNo trace_exit_find(jit_State *J, MCode *pc)
854
{
855
  TraceNo traceno;
856
  for (traceno = 1; traceno < J->sizetrace; traceno++) {
857
    GCtrace *T = traceref(J, traceno);
858
    if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode))
859
      return traceno;
860
  }
861
  lj_assertJ(0, "bad exit pc");
862
  return 0;
863
}
864
#endif
865

866
/* A trace exited. Restore interpreter state. */
867
int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
261,528✔
868
{
869
  ERRNO_SAVE
261,528✔
870
  lua_State *L = J->L;
261,528✔
871
  ExitState *ex = (ExitState *)exptr;
261,528✔
872
  ExitDataCP exd;
261,528✔
873
  int errcode, exitcode = J->exitcode;
261,528✔
874
  TValue exiterr;
261,528✔
875
  const BCIns *pc;
261,528✔
876
  void *cf;
261,528✔
877
  GCtrace *T;
261,528✔
878

879
  setnilV(&exiterr);
261,528✔
880
  if (exitcode) {  /* Trace unwound with error code. */
261,528✔
881
    J->exitcode = 0;
4✔
882
    copyTV(L, &exiterr, L->top-1);
4✔
883
  }
884

885
#ifdef EXITSTATE_PCREG
886
  J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]);
887
#endif
888
  T = traceref(J, J->parent); UNUSED(T);
261,528✔
889
#ifdef EXITSTATE_CHECKEXIT
890
  if (J->exitno == T->nsnap) {  /* Treat stack check like a parent exit. */
891
    lj_assertJ(T->root != 0, "stack check in root trace");
892
    J->exitno = T->ir[REF_BASE].op2;
893
    J->parent = T->ir[REF_BASE].op1;
894
    T = traceref(J, J->parent);
895
  }
896
#endif
897
  lj_assertJ(T != NULL && J->exitno < T->nsnap, "bad trace or exit number");
261,528✔
898
  exd.J = J;
261,528✔
899
  exd.exptr = exptr;
261,528✔
900
  errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
261,528✔
901
  if (errcode)
261,528✔
902
    return -errcode;  /* Return negated error code. */
5✔
903

904
  if (exitcode) copyTV(L, L->top++, &exiterr);  /* Anchor the error object. */
261,523✔
905

906
  if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)))
261,523✔
907
    lj_vmevent_send(L, TEXIT,
261,521✔
908
      lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
909
      setintV(L->top++, J->parent);
910
      setintV(L->top++, J->exitno);
911
      trace_exit_regs(L, ex);
912
    );
261,523✔
913

914
  pc = exd.pc;
261,523✔
915
  cf = cframe_raw(L->cframe);
261,523✔
916
  setcframe_pc(cf, pc);
261,523✔
917
  if (exitcode) {
261,523✔
918
    return -exitcode;
4✔
919
  } else if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) {
261,519✔
920
    /* Just exit to interpreter. */
921
  } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) {
261,517✔
922
    if (!(G(L)->hookmask & HOOK_GC))
607✔
923
      lj_gc_step(L);  /* Exited because of GC: drive GC forward. */
607✔
924
  } else if ((J->flags & JIT_F_ON)) {
260,910✔
925
    trace_hotside(J, pc);
260,908✔
926
  }
927
  if (bc_op(*pc) == BC_JLOOP) {
261,519✔
928
    BCIns *retpc = &traceref(J, bc_d(*pc))->startins;
12✔
929
    if (bc_isret(bc_op(*retpc))) {
12✔
930
      if (J->state == LJ_TRACE_RECORD) {
3✔
931
        J->patchins = *pc;
3✔
932
        J->patchpc = (BCIns *)pc;
3✔
933
        *J->patchpc = *retpc;
3✔
934
        J->bcskip = 1;
3✔
935
      } else {
936
        pc = retpc;
×
937
        setcframe_pc(cf, pc);
×
938
      }
939
    }
940
  }
941
  /* Return MULTRES or 0. */
942
  ERRNO_RESTORE
261,519✔
943
  switch (bc_op(*pc)) {
261,519✔
944
  case BC_CALLM: case BC_CALLMT:
1,411✔
945
    return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) - LJ_FR2);
1,411✔
946
  case BC_RETM:
×
947
    return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc));
×
948
  case BC_TSETM:
×
949
    return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc));
×
950
  default:
260,108✔
951
    if (bc_op(*pc) >= BC_FUNCF)
260,108✔
952
      return (int)((BCReg)(L->top - L->base) + 1);
24✔
953
    return 0;
954
  }
955
}
956

957
#if LJ_UNWIND_JIT
958
/* Given an mcode address determine trace exit address for unwinding. */
959
uintptr_t LJ_FASTCALL lj_trace_unwind(jit_State *J, uintptr_t addr, ExitNo *ep)
4✔
960
{
961
#if EXITTRACE_VMSTATE
962
  TraceNo traceno = J2G(J)->vmstate;
4✔
963
#else
964
  TraceNo traceno = trace_exit_find(J, (MCode *)addr);
965
#endif
966
  GCtrace *T = traceref(J, traceno);
4✔
967
  if (T
4✔
968
#if EXITTRACE_VMSTATE
969
      && addr >= (uintptr_t)T->mcode && addr < (uintptr_t)T->mcode + T->szmcode
4✔
970
#endif
971
     ) {
972
    SnapShot *snap = T->snap;
4✔
973
    SnapNo lo = 0, exitno = T->nsnap;
4✔
974
    uintptr_t ofs = (uintptr_t)((MCode *)addr - T->mcode);  /* MCode units! */
4✔
975
    /* Rightmost binary search for mcode offset to determine exit number. */
976
    do {
16✔
977
      SnapNo mid = (lo+exitno) >> 1;
16✔
978
      if (ofs < snap[mid].mcofs) exitno = mid; else lo = mid + 1;
16✔
979
    } while (lo < exitno);
16✔
980
    exitno--;
4✔
981
    *ep = exitno;
4✔
982
#ifdef EXITSTUBS_PER_GROUP
983
    return (uintptr_t)exitstub_addr(J, exitno);
4✔
984
#else
985
    return (uintptr_t)exitstub_trace_addr(T, exitno);
986
#endif
987
  }
988
  lj_assertJ(0, "bad exit pc");
989
  return 0;
990
}
991
#endif
992

993
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc