• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 15994754186

01 Jul 2025 08:44AM UTC coverage: 93.064% (+0.04%) from 93.023%
15994754186

push

github

Buristan
ARM64: Fix LDP/STP fusing for unaligned accesses.

Thanks to Peter Cawley.

(cherry picked from commit 0fa2f1cbc)

The arm64 emitting of load/store operation works incorrectly in the
case when at least one offset of load/store to be fused into ldp/stp is
misaligned. In this case this misaligning is ignored, and instructions
are fused, which leads to loading/storing from/to at least one incorrect
address.

For example, the following instructions:
| stur  w0, [x1, #17]
| stur  w0, [x1, #21]

May be fused to the following:
| stp   w0, w0, [x1, #16]

This patch prevents fusion in this case by testing the alignment with
the help of bitwise ROR by the alignment value. In case of misaligned
offset, the value overflows the 7-bit length mask in the check.

The negative immediate (7-bit width including sign bit) is limited by
the corresponding addition of `64 << sc` (it is harmless in the case of
positive values).

Sergey Kaplun:
* added the description and the test for the problem

Part of tarantool/tarantool#11278

Reviewed-by: Sergey Bronnikov <sergeyb@tarantool.org>
Signed-off-by: Sergey Kaplun <skaplun@tarantool.org>
(cherry picked from commit 4fd46fab4)

5712 of 6046 branches covered (94.48%)

Branch coverage included in aggregate %.

21792 of 23508 relevant lines covered (92.7%)

3836149.2 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.28
/src/lj_trace.c
1
/*
2
** Trace management.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_trace_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10

11
#if LJ_HASJIT
12

13
#include "lj_gc.h"
14
#include "lj_err.h"
15
#include "lj_debug.h"
16
#include "lj_str.h"
17
#include "lj_frame.h"
18
#include "lj_state.h"
19
#include "lj_bc.h"
20
#include "lj_ir.h"
21
#include "lj_jit.h"
22
#include "lj_iropt.h"
23
#include "lj_mcode.h"
24
#include "lj_trace.h"
25
#include "lj_snap.h"
26
#include "lj_gdbjit.h"
27
#include "lj_record.h"
28
#include "lj_asm.h"
29
#include "lj_dispatch.h"
30
#include "lj_vm.h"
31
#include "lj_vmevent.h"
32
#include "lj_target.h"
33
#if LJ_HASMEMPROF
34
#include "lj_memprof.h"
35
#endif
36
#if LJ_HASSYSPROF
37
#include "lj_sysprof.h"
38
#endif
39

40
/* -- Error handling ------------------------------------------------------ */
41

42
/* Synchronous abort with error message. */
43
void lj_trace_err(jit_State *J, TraceError e)
3,596✔
44
{
45
  setnilV(&J->errinfo);  /* No error info. */
3,596✔
46
  setintV(J->L->top++, (int32_t)e);
3,596✔
47
  lj_err_throw(J->L, LUA_ERRRUN);
3,596✔
48
}
49

50
/* Synchronous abort with error message and error info. */
51
void lj_trace_err_info(jit_State *J, TraceError e)
107✔
52
{
53
  setintV(J->L->top++, (int32_t)e);
107✔
54
  lj_err_throw(J->L, LUA_ERRRUN);
107✔
55
}
56

57
/* -- Trace management ---------------------------------------------------- */
58

59
/* The current trace is first assembled in J->cur. The variable length
60
** arrays point to shared, growable buffers (J->irbuf etc.). When trace
61
** recording ends successfully, the current trace and its data structures
62
** are copied to a new (compact) GCtrace object.
63
*/
64

65
/* Find a free trace number. */
66
static TraceNo trace_findfree(jit_State *J)
22,117✔
67
{
68
  MSize osz, lim;
22,117✔
69
  if (J->freetrace == 0)
22,117✔
70
    J->freetrace = 1;
524✔
71
  for (; J->freetrace < J->sizetrace; J->freetrace++)
23,490✔
72
    if (traceref(J, J->freetrace) == NULL)
23,213✔
73
      return J->freetrace++;
21,840✔
74
  /* Need to grow trace array. */
75
  lim = (MSize)J->param[JIT_P_maxtrace] + 1;
277✔
76
  if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535;
277✔
77
  osz = J->sizetrace;
277✔
78
  if (osz >= lim)
277✔
79
    return 0;  /* Too many traces. */
80
  lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef);
277✔
81
  for (; osz < J->sizetrace; osz++)
9,136✔
82
    setgcrefnull(J->trace[osz]);
8,859✔
83
  return J->freetrace;
277✔
84
}
85

86
#define TRACE_APPENDVEC(field, szfield, tp) \
87
  T->field = (tp *)p; \
88
  memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \
89
  p += J->cur.szfield*sizeof(tp);
90

91
#ifdef LUAJIT_USE_PERFTOOLS
92
/*
93
** Create symbol table of JIT-compiled code. For use with Linux perf tools.
94
** Example usage:
95
**   perf record -f -e cycles luajit test.lua
96
**   perf report -s symbol
97
**   rm perf.data /tmp/perf-*.map
98
*/
99
#include <stdio.h>
100
#include <unistd.h>
101

102
static void perftools_addtrace(GCtrace *T)
103
{
104
  static FILE *fp;
105
  GCproto *pt = &gcref(T->startpt)->pt;
106
  const BCIns *startpc = mref(T->startpc, const BCIns);
107
  const char *name = proto_chunknamestr(pt);
108
  BCLine lineno;
109
  if (name[0] == '@' || name[0] == '=')
110
    name++;
111
  else
112
    name = "(string)";
113
  lj_assertX(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc,
114
             "trace PC out of range");
115
  lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
116
  if (!fp) {
117
    char fname[40];
118
    sprintf(fname, "/tmp/perf-%d.map", getpid());
119
    if (!(fp = fopen(fname, "w"))) return;
120
    setlinebuf(fp);
121
  }
122
  fprintf(fp, "%lx %x TRACE_%d::%s:%u\n",
123
          (long)T->mcode, T->szmcode, T->traceno, name, lineno);
124
}
125
#endif
126

127
/* Allocate space for copy of T. */
128
GCtrace * LJ_FASTCALL lj_trace_alloc(lua_State *L, GCtrace *T)
19,349✔
129
{
130
  size_t sztr = ((sizeof(GCtrace)+7)&~7);
19,349✔
131
  size_t szins = (T->nins-T->nk)*sizeof(IRIns);
19,349✔
132
  size_t sz = sztr + szins +
19,349✔
133
              T->nsnap*sizeof(SnapShot) +
19,349✔
134
              T->nsnapmap*sizeof(SnapEntry);
19,349✔
135
  GCtrace *T2 = lj_mem_newt(L, (MSize)sz, GCtrace);
19,349✔
136
  char *p = (char *)T2 + sztr;
19,349✔
137
  T2->gct = ~LJ_TTRACE;
19,349✔
138
  T2->marked = 0;
19,349✔
139
  T2->traceno = 0;
19,349✔
140
  T2->ir = (IRIns *)p - T->nk;
19,349✔
141
  T2->nins = T->nins;
19,349✔
142
  T2->nk = T->nk;
19,349✔
143
  T2->nsnap = T->nsnap;
19,349✔
144
  T2->nsnapmap = T->nsnapmap;
19,349✔
145
  memcpy(p, T->ir + T->nk, szins);
19,349✔
146
  L2J(L)->tracenum++;
19,349✔
147
  return T2;
19,349✔
148
}
149

150
/* Save current trace by copying and compacting it. */
151
static void trace_save(jit_State *J, GCtrace *T)
18,300✔
152
{
153
  size_t sztr = ((sizeof(GCtrace)+7)&~7);
18,300✔
154
  size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns);
18,300✔
155
  char *p = (char *)T + sztr;
18,300✔
156
  memcpy(T, &J->cur, sizeof(GCtrace));
18,300✔
157
  setgcrefr(T->nextgc, J2G(J)->gc.root);
18,300✔
158
  setgcrefp(J2G(J)->gc.root, T);
18,300✔
159
  newwhite(J2G(J), T);
18,300✔
160
  T->gct = ~LJ_TTRACE;
18,300✔
161
  T->ir = (IRIns *)p - J->cur.nk;  /* The IR has already been copied above. */
18,300✔
162
  p += szins;
18,300✔
163
  TRACE_APPENDVEC(snap, nsnap, SnapShot)
18,300✔
164
  TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry)
18,300✔
165
  J->cur.traceno = 0;
18,300✔
166
  J->curfinal = NULL;
18,300✔
167
  setgcrefp(J->trace[T->traceno], T);
18,300✔
168
  lj_gc_barriertrace(J2G(J), T->traceno);
18,300✔
169
  lj_gdbjit_addtrace(J, T);
18,300✔
170
#ifdef LUAJIT_USE_PERFTOOLS
171
  perftools_addtrace(T);
172
#endif
173

174
  /* Add a new trace to the profiler. */
175
#if LJ_HASMEMPROF
176
  lj_memprof_add_trace(T);
18,300✔
177
#endif
178

179
#if LJ_HASSYSPROF
180
  lj_sysprof_add_trace(T);
18,300✔
181
#endif
182
}
18,300✔
183

184
void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T)
19,346✔
185
{
186
  jit_State *J = G2J(g);
19,346✔
187
  if (T->traceno) {
19,346✔
188
    lj_gdbjit_deltrace(J, T);
5,395✔
189
    if (T->traceno < J->freetrace)
5,395✔
190
      J->freetrace = T->traceno;
4,765✔
191
    setgcrefnull(J->trace[T->traceno]);
5,395✔
192
  }
193
  lj_mem_free(g, T,
38,692✔
194
    ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
19,346✔
195
    T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry));
19,346✔
196
  J->tracenum--;
19,346✔
197
}
19,346✔
198

199
/* Re-enable compiling a prototype by unpatching any modified bytecode. */
200
void lj_trace_reenableproto(GCproto *pt)
51✔
201
{
202
  if ((pt->flags & PROTO_ILOOP)) {
51✔
203
    BCIns *bc = proto_bc(pt);
10✔
204
    BCPos i, sizebc = pt->sizebc;
10✔
205
    pt->flags &= ~PROTO_ILOOP;
10✔
206
    if (bc_op(bc[0]) == BC_IFUNCF)
10✔
207
      setbc_op(&bc[0], BC_FUNCF);
×
208
    for (i = 1; i < sizebc; i++) {
1,670✔
209
      BCOp op = bc_op(bc[i]);
1,660✔
210
      if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP)
1,660✔
211
        setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP);
10✔
212
    }
213
  }
214
}
51✔
215

216
/* Unpatch the bytecode modified by a root trace. */
217
static void trace_unpatch(jit_State *J, GCtrace *T)
218
{
219
  BCOp op = bc_op(T->startins);
220
  BCIns *pc = mref(T->startpc, BCIns);
221
  UNUSED(J);
222
  if (op == BC_JMP)
223
    return;  /* No need to unpatch branches in parent traces (yet). */
224
  switch (bc_op(*pc)) {
225
  case BC_JFORL:
226
    lj_assertJ(traceref(J, bc_d(*pc)) == T, "JFORL references other trace");
227
    *pc = T->startins;
228
    pc += bc_j(T->startins);
229
    lj_assertJ(bc_op(*pc) == BC_JFORI, "FORL does not point to JFORI");
230
    setbc_op(pc, BC_FORI);
231
    break;
232
  case BC_JITERL:
233
  case BC_JLOOP:
234
    lj_assertJ(op == BC_ITERL || op == BC_LOOP || bc_isret(op),
235
               "bad original bytecode %d", op);
236
    *pc = T->startins;
237
    break;
238
  case BC_JFUNCF:
239
    lj_assertJ(op == BC_FUNCF, "bad original bytecode %d", op);
240
    *pc = T->startins;
241
    break;
242
  default:  /* Already unpatched. */
243
    break;
244
  }
245
}
246

247
/* Flush a root trace. */
248
static void trace_flushroot(jit_State *J, GCtrace *T)
249
{
250
  GCproto *pt = &gcref(T->startpt)->pt;
251
  lj_assertJ(T->root == 0, "not a root trace");
252
  lj_assertJ(pt != NULL, "trace has no prototype");
253
  /* Unlink root trace from chain anchored in prototype. */
254
  if (pt->trace == T->traceno) {  /* Trace is first in chain. Easy. */
255
    pt->trace = T->nextroot;
256
unpatch:
257
    /* Unpatch modified bytecode only if the trace has not been flushed. */
258
    trace_unpatch(J, T);
259
  } else if (pt->trace) {  /* Otherwise search in chain of root traces. */
260
    GCtrace *T2 = traceref(J, pt->trace);
261
    if (T2) {
262
      for (; T2->nextroot; T2 = traceref(J, T2->nextroot))
263
        if (T2->nextroot == T->traceno) {
264
          T2->nextroot = T->nextroot;  /* Unlink from chain. */
265
          goto unpatch;
266
        }
267
    }
268
  }
269
}
270

271
/* Flush a trace. Only root traces are considered. */
272
void lj_trace_flush(jit_State *J, TraceNo traceno)
43✔
273
{
274
  if (traceno > 0 && traceno < J->sizetrace) {
43✔
275
    GCtrace *T = traceref(J, traceno);
42✔
276
    if (T && T->root == 0)
42✔
277
      trace_flushroot(J, T);
39✔
278
  }
279
}
43✔
280

281
/* Flush all traces associated with a prototype. */
282
void lj_trace_flushproto(global_State *g, GCproto *pt)
220✔
283
{
284
  while (pt->trace != 0)
237✔
285
    trace_flushroot(G2J(g), traceref(G2J(g), pt->trace));
17✔
286
}
220✔
287

288
/* Flush all traces. */
289
int lj_trace_flushall(lua_State *L)
425✔
290
{
291
  jit_State *J = L2J(L);
425✔
292
  ptrdiff_t i;
425✔
293
  if ((J2G(J)->hookmask & HOOK_GC))
425✔
294
    return 1;
295
  for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) {
41,732✔
296
    GCtrace *T = traceref(J, i);
41,307✔
297
    if (T) {
41,307✔
298
      if (T->root == 0)
12,904✔
299
        trace_flushroot(J, T);
1,803✔
300
      lj_gdbjit_deltrace(J, T);
12,904✔
301
      T->traceno = T->link = 0;  /* Blacklist the link for cont_stitch. */
12,904✔
302
      setgcrefnull(J->trace[i]);
12,904✔
303
    }
304
  }
305
  J->cur.traceno = 0;
425✔
306
  J->freetrace = 0;
425✔
307
  /* Clear penalty cache. */
308
  memset(J->penalty, 0, sizeof(J->penalty));
425✔
309
  /* Free the whole machine code and invalidate all exit stub groups. */
310
  lj_mcode_free(J);
425✔
311
  memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
425✔
312
  lj_vmevent_send(L, TRACE,
425✔
313
    setstrV(L, L->top++, lj_str_newlit(L, "flush"));
314
  );
315
  return 0;
316
}
317

318
/* Initialize JIT compiler state. */
319
void lj_trace_initstate(global_State *g)
392✔
320
{
321
  jit_State *J = G2J(g);
392✔
322
  TValue *tv;
392✔
323

324
  /* Initialize aligned SIMD constants. */
325
  tv = LJ_KSIMD(J, LJ_KSIMD_ABS);
392✔
326
  tv[0].u64 = U64x(7fffffff,ffffffff);
392✔
327
  tv[1].u64 = U64x(7fffffff,ffffffff);
392✔
328
  tv = LJ_KSIMD(J, LJ_KSIMD_NEG);
392✔
329
  tv[0].u64 = U64x(80000000,00000000);
392✔
330
  tv[1].u64 = U64x(80000000,00000000);
392✔
331

332
  /* Initialize 32/64 bit constants. */
333
#if LJ_TARGET_X86ORX64
334
  J->k64[LJ_K64_TOBIT].u64 = U64x(43380000,00000000);
392✔
335
#if LJ_32
336
  J->k64[LJ_K64_M2P64_31].u64 = U64x(c1e00000,00000000);
337
#endif
338
  J->k64[LJ_K64_2P64].u64 = U64x(43f00000,00000000);
392✔
339
  J->k32[LJ_K32_M2P64_31] = LJ_64 ? 0xdf800000 : 0xcf000000;
392✔
340
#endif
341
#if LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS64
342
  J->k64[LJ_K64_M2P64].u64 = U64x(c3f00000,00000000);
392✔
343
#endif
344
#if LJ_TARGET_PPC
345
  J->k32[LJ_K32_2P52_2P31] = 0x59800004;
346
  J->k32[LJ_K32_2P52] = 0x59800000;
347
#endif
348
#if LJ_TARGET_PPC || LJ_TARGET_MIPS
349
  J->k32[LJ_K32_2P31] = 0x4f000000;
350
#endif
351
#if LJ_TARGET_MIPS
352
  J->k64[LJ_K64_2P31].u64 = U64x(41e00000,00000000);
353
#if LJ_64
354
  J->k64[LJ_K64_2P63].u64 = U64x(43e00000,00000000);
355
  J->k32[LJ_K32_2P63] = 0x5f000000;
356
  J->k32[LJ_K32_M2P64] = 0xdf800000;
357
#endif
358
#endif
359
}
392✔
360

361
/* Free everything associated with the JIT compiler state. */
362
void lj_trace_freestate(global_State *g)
382✔
363
{
364
  jit_State *J = G2J(g);
382✔
365
#ifdef LUA_USE_ASSERT
366
  {  /* This assumes all traces have already been freed. */
367
    ptrdiff_t i;
368
    for (i = 1; i < (ptrdiff_t)J->sizetrace; i++)
369
      lj_assertG(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL,
370
                 "trace still allocated");
371
  }
372
#endif
373
  lj_mcode_free(J);
382✔
374
  lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
382✔
375
  lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
382✔
376
  lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);
382✔
377
  lj_mem_freevec(g, J->trace, J->sizetrace, GCRef);
382✔
378
}
382✔
379

380
/* -- Penalties and blacklisting ------------------------------------------ */
381

382
/* Blacklist a bytecode instruction. */
383
static void blacklist_pc(GCproto *pt, BCIns *pc)
9✔
384
{
385
  setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP);
9✔
386
  pt->flags |= PROTO_ILOOP;
9✔
387
}
388

389
/* Penalize a bytecode instruction. */
390
static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e)
2,078✔
391
{
392
  uint32_t i, val = PENALTY_MIN;
2,078✔
393
  for (i = 0; i < PENALTY_SLOTS; i++)
105,100✔
394
    if (mref(J->penalty[i].pc, const BCIns) == pc) {  /* Cache slot found? */
103,758✔
395
      /* First try to bump its hotcount several times. */
396
      val = ((uint32_t)J->penalty[i].val << 1) +
736✔
397
            LJ_PRNG_BITS(J, PENALTY_RNDBITS);
736✔
398
      if (val > PENALTY_MAX) {
736✔
399
        blacklist_pc(pt, pc);  /* Blacklist it, if that didn't help. */
9✔
400
        return;
9✔
401
      }
402
      goto setpenalty;
727✔
403
    }
404
  /* Assign a new penalty cache slot. */
405
  i = J->penaltyslot;
1,342✔
406
  J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1);
1,342✔
407
  setmref(J->penalty[i].pc, pc);
1,342✔
408
setpenalty:
2,069✔
409
  J->penalty[i].val = (uint16_t)val;
2,069✔
410
  J->penalty[i].reason = e;
2,069✔
411
  hotcount_set(J2GG(J), pc+1, val);
2,069✔
412
}
413

414
/* -- Trace compiler state machine ---------------------------------------- */
415

416
/* Start tracing. */
417
static void trace_start(jit_State *J)
22,198✔
418
{
419
  lua_State *L;
22,198✔
420
  TraceNo traceno;
22,198✔
421

422
  if ((J->pt->flags & PROTO_NOJIT)) {  /* JIT disabled for this proto? */
22,198✔
423
    if (J->parent == 0 && J->exitno == 0) {
81✔
424
      /* Lazy bytecode patching to disable hotcount events. */
425
      lj_assertJ(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
81✔
426
                 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF,
427
                 "bad hot bytecode %d", bc_op(*J->pc));
428
      setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP);
81✔
429
      J->pt->flags |= PROTO_ILOOP;
81✔
430
    }
431
    J->state = LJ_TRACE_IDLE;  /* Silently ignored. */
81✔
432
    return;
81✔
433
  }
434

435
  /* Get a new trace number. */
436
  traceno = trace_findfree(J);
22,117✔
437
  if (LJ_UNLIKELY(traceno == 0)) {  /* No free trace? */
22,117✔
438
    lj_assertJ((J2G(J)->hookmask & HOOK_GC) == 0,
×
439
               "recorder called from GC hook");
440
    lj_trace_flushall(J->L);
×
441
    J->state = LJ_TRACE_IDLE;  /* Silently ignored. */
×
442
    return;
×
443
  }
444
  setgcrefp(J->trace[traceno], &J->cur);
22,117✔
445

446
  /* Setup enough of the current trace to be able to send the vmevent. */
447
  memset(&J->cur, 0, sizeof(GCtrace));
22,117✔
448
  J->cur.traceno = traceno;
22,117✔
449
  J->cur.nins = J->cur.nk = REF_BASE;
22,117✔
450
  J->cur.ir = J->irbuf;
22,117✔
451
  J->cur.snap = J->snapbuf;
22,117✔
452
  J->cur.snapmap = J->snapmapbuf;
22,117✔
453
  J->mergesnap = 0;
22,117✔
454
  J->needsnap = 0;
22,117✔
455
  J->bcskip = 0;
22,117✔
456
  J->guardemit.irt = 0;
22,117✔
457
  J->postproc = LJ_POST_NONE;
22,117✔
458
  lj_resetsplit(J);
22,117✔
459
  J->retryrec = 0;
22,117✔
460
  J->ktrace = 0;
22,117✔
461
  setgcref(J->cur.startpt, obj2gco(J->pt));
22,117✔
462

463
  L = J->L;
22,117✔
464
  lj_vmevent_send(L, TRACE,
22,117✔
465
    setstrV(L, L->top++, lj_str_newlit(L, "start"));
466
    setintV(L->top++, traceno);
467
    setfuncV(L, L->top++, J->fn);
468
    setintV(L->top++, proto_bcpos(J->pt, J->pc));
469
    if (J->parent) {
470
      setintV(L->top++, J->parent);
471
      setintV(L->top++, J->exitno);
472
    } else {
473
      BCOp op = bc_op(*J->pc);
474
      if (op == BC_CALLM || op == BC_CALL || op == BC_ITERC) {
475
        setintV(L->top++, J->exitno);  /* Parent of stitched trace. */
476
        setintV(L->top++, -1);
477
      }
478
    }
479
  );
22,117✔
480
  lj_record_setup(J);
22,117✔
481
}
482

483
/* Stop tracing. */
484
static void trace_stop(jit_State *J)
18,301✔
485
{
486
  BCIns *pc = mref(J->cur.startpc, BCIns);
18,301✔
487
  BCOp op = bc_op(J->cur.startins);
18,301✔
488
  GCproto *pt = &gcref(J->cur.startpt)->pt;
18,301✔
489
  TraceNo traceno = J->cur.traceno;
18,301✔
490
  GCtrace *T = J->curfinal;
18,301✔
491
  lua_State *L;
18,301✔
492

493
  switch (op) {
18,301✔
494
  case BC_FORL:
4,131✔
495
    setbc_op(pc+bc_j(J->cur.startins), BC_JFORI);  /* Patch FORI, too. */
4,131✔
496
    /* fallthrough */
497
  case BC_LOOP:
5,456✔
498
  case BC_ITERL:
499
  case BC_FUNCF:
500
    /* Patch bytecode of starting instruction in root trace. */
501
    setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP);
5,456✔
502
    setbc_d(pc, traceno);
5,456✔
503
  addroot:
5,457✔
504
    /* Add to root trace chain in prototype. */
505
    J->cur.nextroot = pt->trace;
5,457✔
506
    pt->trace = (TraceNo1)traceno;
5,457✔
507
    break;
5,457✔
508
  case BC_RET:
1✔
509
  case BC_RET0:
510
  case BC_RET1:
511
    *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno);
1✔
512
    goto addroot;
1✔
513
  case BC_JMP:
12,759✔
514
    /* Patch exit branch in parent to side trace entry. */
515
    lj_assertJ(J->parent != 0 && J->cur.root != 0, "not a side trace");
12,759✔
516
    lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode);
12,759✔
517
    /* Avoid compiling a side trace twice (stack resizing uses parent exit). */
518
    {
519
      SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
12,759✔
520
      snap->count = SNAPCOUNT_DONE;
12,759✔
521
      if (J->cur.topslot > snap->topslot) snap->topslot = J->cur.topslot;
12,759✔
522
    }
523
    /* Add to side trace chain in root trace. */
524
    {
525
      GCtrace *root = traceref(J, J->cur.root);
12,759✔
526
      root->nchild++;
12,759✔
527
      J->cur.nextside = root->nextside;
12,759✔
528
      root->nextside = (TraceNo1)traceno;
12,759✔
529
    }
530
    break;
12,759✔
531
  case BC_CALLM:
85✔
532
  case BC_CALL:
533
  case BC_ITERC:
534
    /* Trace stitching: patch link of previous trace. */
535
    traceref(J, J->exitno)->link = traceno;
85✔
536
    break;
85✔
537
  default:
538
    lj_assertJ(0, "bad stop bytecode %d", op);
539
    break;
540
  }
541

542
  /* Commit new mcode only after all patching is done. */
543
  lj_mcode_commit(J, J->cur.mcode);
18,301✔
544
  J->postproc = LJ_POST_NONE;
18,300✔
545
  trace_save(J, T);
18,300✔
546

547
  L = J->L;
18,300✔
548
  lj_vmevent_send(L, TRACE,
18,300✔
549
    setstrV(L, L->top++, lj_str_newlit(L, "stop"));
550
    setintV(L->top++, traceno);
551
    setfuncV(L, L->top++, J->fn);
552
  );
18,300✔
553
}
18,300✔
554

555
/* Start a new root trace for down-recursion. */
556
static int trace_downrec(jit_State *J)
1✔
557
{
558
  /* Restart recording at the return instruction. */
559
  lj_assertJ(J->pt != NULL, "no active prototype");
1✔
560
  lj_assertJ(bc_isret(bc_op(*J->pc)), "not at a return bytecode");
1✔
561
  if (bc_op(*J->pc) == BC_RETM) {
1✔
562
    J->ntraceabort++;
×
563
    return 0;  /* NYI: down-recursion with RETM. */
×
564
  }
565
  J->parent = 0;
1✔
566
  J->exitno = 0;
1✔
567
  J->state = LJ_TRACE_RECORD;
1✔
568
  trace_start(J);
1✔
569
  return 1;
1✔
570
}
571

572
/* Abort tracing. */
573
static int trace_abort(jit_State *J)
4,520✔
574
{
575
  lua_State *L = J->L;
4,520✔
576
  TraceError e = LJ_TRERR_RECERR;
4,520✔
577
  TraceNo traceno;
4,520✔
578

579
  J->postproc = LJ_POST_NONE;
4,520✔
580
  lj_mcode_abort(J);
4,520✔
581
  if (J->curfinal) {
4,520✔
582
    lj_trace_free(J2G(J), J->curfinal);
879✔
583
    J->curfinal = NULL;
879✔
584
  }
585
  if (tvisnumber(L->top-1))
4,520✔
586
    e = (TraceError)numberVint(L->top-1);
4,518✔
587
  if (e == LJ_TRERR_MCODELM) {
4,518✔
588
    L->top--;  /* Remove error object */
704✔
589
    J->state = LJ_TRACE_ASM;
704✔
590
    return 1;  /* Retry ASM with new MCode area. */
704✔
591
  }
592
  /* Penalize or blacklist starting bytecode instruction. */
593
  if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
3,816✔
594
    if (J->exitno == 0) {
2,095✔
595
      BCIns *startpc = mref(J->cur.startpc, BCIns);
2,078✔
596
      if (e == LJ_TRERR_RETRY)
2,078✔
597
        hotcount_set(J2GG(J), startpc+1, 1);  /* Immediate retry. */
×
598
      else
599
        penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e);
2,078✔
600
    } else {
601
      traceref(J, J->exitno)->link = J->exitno;  /* Self-link is blacklisted. */
17✔
602
    }
603
  }
604

605
  /* Is there anything to abort? */
606
  traceno = J->cur.traceno;
3,816✔
607
  if (traceno) {
3,816✔
608
    ptrdiff_t errobj = savestack(L, L->top-1);  /* Stack may be resized. */
3,815✔
609
    J->cur.link = 0;
3,815✔
610
    J->cur.linktype = LJ_TRLINK_NONE;
3,815✔
611
    lj_vmevent_send(L, TRACE,
3,820✔
612
      cTValue *bot = tvref(L->stack)+LJ_FR2;
613
      cTValue *frame;
614
      const BCIns *pc;
615
      BCPos pos = 0;
616
      setstrV(L, L->top++, lj_str_newlit(L, "abort"));
617
      setintV(L->top++, traceno);
618
      /* Find original Lua function call to generate a better error message. */
619
      for (frame = J->L->base-1, pc = J->pc; ; frame = frame_prev(frame)) {
620
        if (isluafunc(frame_func(frame))) {
621
          pos = proto_bcpos(funcproto(frame_func(frame)), pc);
622
          break;
623
        } else if (frame_prev(frame) <= bot) {
624
          break;
625
        } else if (frame_iscont(frame)) {
626
          pc = frame_contpc(frame) - 1;
627
        } else {
628
          pc = frame_pc(frame) - 1;
629
        }
630
      }
631
      setfuncV(L, L->top++, frame_func(frame));
632
      setintV(L->top++, pos);
633
      copyTV(L, L->top++, restorestack(L, errobj));
634
      copyTV(L, L->top++, &J->errinfo);
635
    );
3,815✔
636
    /* Drop aborted trace after the vmevent (which may still access it). */
637
    setgcrefnull(J->trace[traceno]);
3,815✔
638
    if (traceno < J->freetrace)
3,815✔
639
      J->freetrace = traceno;
3,775✔
640
    J->cur.traceno = 0;
3,815✔
641
  }
642
  L->top--;  /* Remove error object */
3,816✔
643
  if (e == LJ_TRERR_DOWNREC)
3,816✔
644
    return trace_downrec(J);
2✔
645
  else if (e == LJ_TRERR_MCODEAL)
3,815✔
646
    lj_trace_flushall(L);
91✔
647
  J->ntraceabort++;
3,815✔
648
  return 0;
3,815✔
649
}
650

651
/* Perform pending re-patch of a bytecode instruction. */
652
static LJ_AINLINE void trace_pendpatch(jit_State *J, int force)
7,192,652✔
653
{
654
  if (LJ_UNLIKELY(J->patchpc)) {
7,192,652✔
655
    if (force || J->bcskip == 0) {
614,190✔
656
      *J->patchpc = J->patchins;
614,187✔
657
      J->patchpc = NULL;
614,187✔
658
    } else {
659
      J->bcskip = 0;
3✔
660
    }
661
  }
662
}
663

664
/* State machine for the trace compiler. Protected callback. */
665
static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud)
7,196,360✔
666
{
667
  jit_State *J = (jit_State *)ud;
7,196,360✔
668
  UNUSED(dummy);
7,234,029✔
669
  do {
7,234,029✔
670
  retry:
36,964✔
671
    switch (J->state) {
7,234,029✔
672
    case LJ_TRACE_START:
22,197✔
673
      J->state = LJ_TRACE_RECORD;  /* trace_start() may change state. */
22,197✔
674
      trace_start(J);
22,197✔
675
      lj_dispatch_update(J2G(J));
22,196✔
676
      break;
22,196✔
677

678
    case LJ_TRACE_RECORD:
679
      trace_pendpatch(J, 0);
7,169,644✔
680
      setvmstate(J2G(J), RECORD);
7,169,644✔
681
      lj_vmevent_send_(L, RECORD,
7,169,644✔
682
        /* Save/restore state for trace recorder. */
683
        TValue savetv = J2G(J)->tmptv;
684
        TValue savetv2 = J2G(J)->tmptv2;
685
        TraceNo parent = J->parent;
686
        ExitNo exitno = J->exitno;
687
        setintV(L->top++, J->cur.traceno);
688
        setfuncV(L, L->top++, J->fn);
689
        setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1);
690
        setintV(L->top++, J->framedepth);
691
      ,
692
        J2G(J)->tmptv = savetv;
693
        J2G(J)->tmptv2 = savetv2;
694
        J->parent = parent;
695
        J->exitno = exitno;
696
      );
7,169,644✔
697
      lj_record_ins(J);
7,169,644✔
698
      break;
7,169,644✔
699

700
    case LJ_TRACE_END:
701
      trace_pendpatch(J, 1);
18,488✔
702
      J->loopref = 0;
18,488✔
703
      if ((J->flags & JIT_F_OPT_LOOP) &&
18,488✔
704
          J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) {
17,035✔
705
        setvmstate(J2G(J), OPT);
3,724✔
706
        lj_opt_dce(J);
3,724✔
707
        if (lj_opt_loop(J)) {  /* Loop optimization failed? */
3,724✔
708
          J->cur.link = 0;
12✔
709
          J->cur.linktype = LJ_TRLINK_NONE;
12✔
710
          J->loopref = J->cur.nins;
12✔
711
          J->state = LJ_TRACE_RECORD;  /* Try to continue recording. */
12✔
712
          break;
12✔
713
        }
714
        J->loopref = J->chain[IR_LOOP];  /* Needed by assembler. */
3,712✔
715
      }
716
      lj_opt_split(J);
18,476✔
717
      lj_opt_sink(J);
18,476✔
718
      if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE;
18,476✔
719
      J->state = LJ_TRACE_ASM;
18,476✔
720
      break;
18,476✔
721

722
    case LJ_TRACE_ASM:
19,180✔
723
      setvmstate(J2G(J), ASM);
19,180✔
724
      lj_asm_trace(J, &J->cur);
19,180✔
725
      trace_stop(J);
18,301✔
726
      setvmstate(J2G(J), INTERP);
18,300✔
727
      J->state = LJ_TRACE_IDLE;
18,300✔
728
      lj_dispatch_update(J2G(J));
18,300✔
729
      return NULL;
18,300✔
730

731
    default:  /* Trace aborted asynchronously. */
825✔
732
      setintV(L->top++, (int32_t)LJ_TRERR_RECERR);
825✔
733
      /* fallthrough */
734
    case LJ_TRACE_ERR:
4,520✔
735
      trace_pendpatch(J, 1);
4,520✔
736
      if (trace_abort(J))
4,520✔
737
        goto retry;
705✔
738
      setvmstate(J2G(J), INTERP);
3,815✔
739
      J->state = LJ_TRACE_IDLE;
3,815✔
740
      lj_dispatch_update(J2G(J));
3,815✔
741
      return NULL;
3,815✔
742
    }
743
  } while (J->state > LJ_TRACE_RECORD);
7,207,513✔
744
  return NULL;
745
}
746

747
/* -- Event handling ------------------------------------------------------ */
748

749
/* A bytecode instruction is about to be executed. Record it. */
750
void lj_trace_ins(jit_State *J, const BCIns *pc)
7,192,665✔
751
{
752
  /* Note: J->L must already be set. pc is the true bytecode PC here. */
753
  J->pc = pc;
7,192,665✔
754
  J->fn = curr_func(J->L);
7,192,665✔
755
  J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL;
7,192,665✔
756
  while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0)
7,196,360✔
757
    J->state = LJ_TRACE_ERR;
3,695✔
758
}
7,192,664✔
759

760
/* A hotcount triggered. Start recording a root trace. */
761
void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
218,478✔
762
{
763
  /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */
764
  ERRNO_SAVE
218,478✔
765
  /* Reset hotcount. */
766
  hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP);
218,478✔
767
  /* Only start a new trace if not recording or inside __gc call or vmevent. */
768
  if (J->state == LJ_TRACE_IDLE &&
218,478✔
769
      !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
218,466✔
770
    J->parent = 0;  /* Root trace. */
7,615✔
771
    J->exitno = 0;
7,615✔
772
    J->state = LJ_TRACE_START;
7,615✔
773
    lj_trace_ins(J, pc-1);
7,615✔
774
  }
775
  ERRNO_RESTORE
218,478✔
776
}
218,478✔
777

778
/* Check for a hot side exit. If yes, start recording a side trace. */
779
static void trace_hotside(jit_State *J, const BCIns *pc)
197,050✔
780
{
781
  SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
197,050✔
782
  if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
197,050✔
783
      isluafunc(curr_func(J->L)) &&
197,050✔
784
      snap->count != SNAPCOUNT_DONE &&
197,034✔
785
      ++snap->count >= J->param[JIT_P_hotexit]) {
196,974✔
786
    lj_assertJ(J->state == LJ_TRACE_IDLE, "hot side exit while recording");
14,480✔
787
    /* J->parent is non-zero for a side trace. */
788
    J->state = LJ_TRACE_START;
14,480✔
789
    lj_trace_ins(J, pc);
14,480✔
790
  }
791
}
197,050✔
792

793
/* Stitch a new trace to the previous trace. */
794
void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc)
149✔
795
{
796
  /* Only start a new trace if not recording or inside __gc call or vmevent. */
797
  if (J->state == LJ_TRACE_IDLE &&
149✔
798
      !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
102✔
799
    J->parent = 0;  /* Have to treat it like a root trace. */
102✔
800
    /* J->exitno is set to the invoking trace. */
801
    J->state = LJ_TRACE_START;
102✔
802
    lj_trace_ins(J, pc);
102✔
803
  }
804
}
149✔
805

806

807
/* Tiny struct to pass data to protected call. */
808
typedef struct ExitDataCP {
809
  jit_State *J;
810
  void *exptr;                /* Pointer to exit state. */
811
  const BCIns *pc;        /* Restart interpreter at this PC. */
812
} ExitDataCP;
813

814
/* Need to protect lj_snap_restore because it may throw. */
815
static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud)
1,019,659✔
816
{
817
  ExitDataCP *exd = (ExitDataCP *)ud;
1,019,659✔
818
  /* Always catch error here and don't call error function. */
819
  cframe_errfunc(L->cframe) = 0;
1,019,659✔
820
  cframe_nres(L->cframe) = -2*LUAI_MAXSTACK*(int)sizeof(TValue);
1,019,659✔
821
  exd->pc = lj_snap_restore(exd->J, exd->exptr);
1,019,659✔
822
  UNUSED(dummy);
1,019,649✔
823
  return NULL;
1,019,649✔
824
}
825

826
/* Need to protect lj_gc_step because it may throw. */
827
static TValue *trace_exit_gc_cp(lua_State *L, lua_CFunction dummy, void *unused)
822,590✔
828
{
829
  /* Always catch error here and don't call error function. */
830
  cframe_errfunc(L->cframe) = 0;
822,590✔
831
  cframe_nres(L->cframe) = -2*LUAI_MAXSTACK*(int)sizeof(TValue);
822,590✔
832
  lj_gc_step(L);
822,590✔
833
  UNUSED(dummy);
822,590✔
834
  UNUSED(unused);
822,590✔
835
  return NULL;
822,590✔
836
}
837

838
#ifndef LUAJIT_DISABLE_VMEVENT
839
/* Push all registers from exit state. */
840
static void trace_exit_regs(lua_State *L, ExitState *ex)
841
{
842
  int32_t i;
843
  setintV(L->top++, RID_NUM_GPR);
844
  setintV(L->top++, RID_NUM_FPR);
845
  for (i = 0; i < RID_NUM_GPR; i++) {
846
    if (sizeof(ex->gpr[i]) == sizeof(int32_t))
847
      setintV(L->top++, (int32_t)ex->gpr[i]);
848
    else
849
      setnumV(L->top++, (lua_Number)ex->gpr[i]);
850
  }
851
#if !LJ_SOFTFP
852
  for (i = 0; i < RID_NUM_FPR; i++) {
853
    setnumV(L->top, ex->fpr[i]);
854
    if (LJ_UNLIKELY(tvisnan(L->top)))
855
      setnanV(L->top);
856
    L->top++;
857
  }
858
#endif
859
}
860
#endif
861

862
#if defined(EXITSTATE_PCREG) || (LJ_UNWIND_JIT && !EXITTRACE_VMSTATE)
863
/* Determine trace number from pc of exit instruction. */
864
static TraceNo trace_exit_find(jit_State *J, MCode *pc)
865
{
866
  TraceNo traceno;
867
  for (traceno = 1; traceno < J->sizetrace; traceno++) {
868
    GCtrace *T = traceref(J, traceno);
869
    if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode))
870
      return traceno;
871
  }
872
  lj_assertJ(0, "bad exit pc");
873
  return 0;
874
}
875
#endif
876

877
/* A trace exited. Restore interpreter state. */
878
int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
1,019,659✔
879
{
880
  ERRNO_SAVE
1,019,659✔
881
  lua_State *L = J->L;
1,019,659✔
882
  ExitState *ex = (ExitState *)exptr;
1,019,659✔
883
  ExitDataCP exd;
1,019,659✔
884
  int errcode, exitcode = J->exitcode;
1,019,659✔
885
  TValue exiterr;
1,019,659✔
886
  const BCIns *pc;
1,019,659✔
887
  void *cf;
1,019,659✔
888
  GCtrace *T;
1,019,659✔
889

890
  setnilV(&exiterr);
1,019,659✔
891
  if (exitcode) {  /* Trace unwound with error code. */
1,019,659✔
892
    J->exitcode = 0;
4✔
893
    copyTV(L, &exiterr, L->top-1);
4✔
894
  }
895

896
#ifdef EXITSTATE_PCREG
897
  J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]);
898
#endif
899
  T = traceref(J, J->parent); UNUSED(T);
1,019,659✔
900
#ifdef EXITSTATE_CHECKEXIT
901
  if (J->exitno == T->nsnap) {  /* Treat stack check like a parent exit. */
902
    lj_assertJ(T->root != 0, "stack check in root trace");
903
    J->exitno = T->ir[REF_BASE].op2;
904
    J->parent = T->ir[REF_BASE].op1;
905
    T = traceref(J, J->parent);
906
  }
907
#endif
908
  lj_assertJ(T != NULL && J->exitno < T->nsnap, "bad trace or exit number");
1,019,659✔
909
  exd.J = J;
1,019,659✔
910
  exd.exptr = exptr;
1,019,659✔
911
  errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
1,019,659✔
912
  if (errcode)
1,019,659✔
913
    return -errcode;  /* Return negated error code. */
10✔
914

915
  if (exitcode) copyTV(L, L->top++, &exiterr);  /* Anchor the error object. */
1,019,649✔
916

917
  if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)))
1,019,649✔
918
    lj_vmevent_send(L, TEXIT,
1,019,648✔
919
      lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
920
      setintV(L->top++, J->parent);
921
      setintV(L->top++, J->exitno);
922
      trace_exit_regs(L, ex);
923
    );
1,019,649✔
924

925
  pc = exd.pc;
1,019,649✔
926
  cf = cframe_raw(L->cframe);
1,019,649✔
927
  setcframe_pc(cf, pc);
1,019,649✔
928
  if (exitcode) {
1,019,649✔
929
    return -exitcode;
4✔
930
  } else if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) {
1,019,645✔
931
    /* Just exit to interpreter. */
932
  } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) {
1,019,644✔
933
    if (!(G(L)->hookmask & HOOK_GC)) {
822,590✔
934
      /* Exited because of GC: drive GC forward. */
935
      errcode = lj_vm_cpcall(L, NULL, NULL, trace_exit_gc_cp);
822,590✔
936
      if (errcode)
822,590✔
937
        return -errcode;  /* Return negated error code. */
×
938
    }
939
  } else if ((J->flags & JIT_F_ON)) {
197,054✔
940
    trace_hotside(J, pc);
197,050✔
941
  }
942
  if (bc_op(*pc) == BC_JLOOP) {
1,019,645✔
943
    BCIns *retpc = &traceref(J, bc_d(*pc))->startins;
12✔
944
    if (bc_isret(bc_op(*retpc))) {
12✔
945
      if (J->state == LJ_TRACE_RECORD) {
3✔
946
        J->patchins = *pc;
3✔
947
        J->patchpc = (BCIns *)pc;
3✔
948
        *J->patchpc = *retpc;
3✔
949
        J->bcskip = 1;
3✔
950
      } else {
951
        pc = retpc;
×
952
        setcframe_pc(cf, pc);
×
953
      }
954
    }
955
  }
956
  /* Return MULTRES or 0. */
957
  ERRNO_RESTORE
1,019,645✔
958
  switch (bc_op(*pc)) {
1,019,645✔
959
  case BC_CALLM: case BC_CALLMT:
1,064✔
960
    return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) - LJ_FR2);
1,064✔
961
  case BC_RETM:
×
962
    return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc));
×
963
  case BC_TSETM:
×
964
    return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc));
×
965
  default:
1,018,581✔
966
    if (bc_op(*pc) >= BC_FUNCF)
1,018,581✔
967
      return (int)((BCReg)(L->top - L->base) + 1);
16✔
968
    return 0;
969
  }
970
}
971

972
#if LJ_UNWIND_JIT
973
/* Given an mcode address determine trace exit address for unwinding. */
974
uintptr_t LJ_FASTCALL lj_trace_unwind(jit_State *J, uintptr_t addr, ExitNo *ep)
4✔
975
{
976
#if EXITTRACE_VMSTATE
977
  TraceNo traceno = J2G(J)->vmstate;
4✔
978
#else
979
  TraceNo traceno = trace_exit_find(J, (MCode *)addr);
980
#endif
981
  GCtrace *T = traceref(J, traceno);
4✔
982
  if (T
4✔
983
#if EXITTRACE_VMSTATE
984
      && addr >= (uintptr_t)T->mcode && addr < (uintptr_t)T->mcode + T->szmcode
4✔
985
#endif
986
     ) {
987
    SnapShot *snap = T->snap;
4✔
988
    SnapNo lo = 0, exitno = T->nsnap;
4✔
989
    uintptr_t ofs = (uintptr_t)((MCode *)addr - T->mcode);  /* MCode units! */
4✔
990
    /* Rightmost binary search for mcode offset to determine exit number. */
991
    do {
17✔
992
      SnapNo mid = (lo+exitno) >> 1;
17✔
993
      if (ofs < snap[mid].mcofs) exitno = mid; else lo = mid + 1;
17✔
994
    } while (lo < exitno);
17✔
995
    exitno--;
4✔
996
    *ep = exitno;
4✔
997
#ifdef EXITSTUBS_PER_GROUP
998
    return (uintptr_t)exitstub_addr(J, exitno);
4✔
999
#else
1000
    return (uintptr_t)exitstub_trace_addr(T, exitno);
1001
#endif
1002
  }
1003
  lj_assertJ(0, "bad exit pc");
1004
  return 0;
1005
}
1006
#endif
1007

1008
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc