• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 6035198545

31 Aug 2023 08:55AM UTC coverage: 88.225% (+0.4%) from 87.822%
6035198545

push

github

fckxorg
test: don't skip tool CLI flag for tarantool

That skipcond was introduced to overcome the obstacles
of LuaJIT's integration testing in Tarantool. Since
the required patch is now in the Tarantool master, this
skipcond is now unnecessary.

Related to tarantool/tarantool#5688

5340 of 5975 branches covered (0.0%)

Branch coverage included in aggregate %.

20495 of 23308 relevant lines covered (87.93%)

1297339.67 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.11
/src/lj_snap.c
1
/*
2
** Snapshot handling.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_snap_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10

11
#if LJ_HASJIT
12

13
#include "lj_gc.h"
14
#include "lj_tab.h"
15
#include "lj_state.h"
16
#include "lj_frame.h"
17
#include "lj_bc.h"
18
#include "lj_ir.h"
19
#include "lj_jit.h"
20
#include "lj_iropt.h"
21
#include "lj_trace.h"
22
#include "lj_snap.h"
23
#include "lj_target.h"
24
#if LJ_HASFFI
25
#include "lj_ctype.h"
26
#include "lj_cdata.h"
27
#endif
28

29
/* Pass IR on to next optimization in chain (FOLD). */
30
#define emitir(ot, a, b)        (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
31

32
/* Emit raw IR without passing through optimizations. */
33
#define emitir_raw(ot, a, b)        (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
34

35
/* -- Snapshot buffer allocation ------------------------------------------ */
36

37
/* Grow snapshot buffer. */
38
void lj_snap_grow_buf_(jit_State *J, MSize need)
108✔
39
{
40
  MSize maxsnap = (MSize)J->param[JIT_P_maxsnap];
108✔
41
  if (need > maxsnap)
108✔
42
    lj_trace_err(J, LJ_TRERR_SNAPOV);
×
43
  lj_mem_growvec(J->L, J->snapbuf, J->sizesnap, maxsnap, SnapShot);
108✔
44
  J->cur.snap = J->snapbuf;
108✔
45
}
108✔
46

47
/* Grow snapshot map buffer. */
48
void lj_snap_grow_map_(jit_State *J, MSize need)
108✔
49
{
50
  if (need < 2*J->sizesnapmap)
108✔
51
    need = 2*J->sizesnapmap;
52
  else if (need < 64)
73✔
53
    need = 64;
54
  J->snapmapbuf = (SnapEntry *)lj_mem_realloc(J->L, J->snapmapbuf,
216✔
55
                    J->sizesnapmap*sizeof(SnapEntry), need*sizeof(SnapEntry));
108✔
56
  J->cur.snapmap = J->snapmapbuf;
108✔
57
  J->sizesnapmap = need;
108✔
58
}
108✔
59

60
/* -- Snapshot generation ------------------------------------------------- */
61

62
/* Add all modified slots to the snapshot. */
63
static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
44,369✔
64
{
65
  IRRef retf = J->chain[IR_RETF];  /* Limits SLOAD restore elimination. */
44,369✔
66
  BCReg s;
44,369✔
67
  MSize n = 0;
44,369✔
68
  for (s = 0; s < nslots; s++) {
446,876✔
69
    TRef tr = J->slot[s];
402,507✔
70
    IRRef ref = tref_ref(tr);
402,507✔
71
#if LJ_FR2
72
    if (s == 1) {  /* Ignore slot 1 in LJ_FR2 mode, except if tailcalled. */
402,507✔
73
      if ((tr & TREF_FRAME))
44,369✔
74
        map[n++] = SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL);
17,725✔
75
      continue;
44,369✔
76
    }
77
    if ((tr & (TREF_FRAME | TREF_CONT)) && !ref) {
358,138✔
78
      cTValue *base = J->L->base - J->baseslot;
9,527✔
79
      tr = J->slot[s] = (tr & 0xff0000) | lj_ir_k64(J, IR_KNUM, base[s].u64);
9,527✔
80
      ref = tref_ref(tr);
9,527✔
81
    }
82
#endif
83
    if (ref) {
358,138✔
84
      SnapEntry sn = SNAP_TR(s, tr);
243,365✔
85
      IRIns *ir = &J->cur.ir[ref];
243,365✔
86
      if ((LJ_FR2 || !(sn & (SNAP_CONT|SNAP_FRAME))) &&
243,365✔
87
          ir->o == IR_SLOAD && ir->op1 == s && ref > retf) {
243,365✔
88
        /* No need to snapshot unmodified non-inherited slots. */
89
        if (!(ir->op2 & IRSLOAD_INHERIT))
67,998✔
90
          continue;
54,925✔
91
        /* No need to restore readonly slots and unmodified non-parent slots. */
92
        if (!(LJ_DUALNUM && (ir->op2 & IRSLOAD_CONVERT)) &&
13,073✔
93
            (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT)
94
          sn |= SNAP_NORESTORE;
3,574✔
95
      }
96
      if (LJ_SOFTFP32 && irt_isnum(ir->t))
188,440✔
97
        sn |= SNAP_SOFTFPNUM;
98
      map[n++] = sn;
188,440✔
99
    }
100
  }
101
  return n;
44,369✔
102
}
103

104
/* Add frame links at the end of the snapshot. */
105
static MSize snapshot_framelinks(jit_State *J, SnapEntry *map, uint8_t *topslot)
44,369✔
106
{
107
  cTValue *frame = J->L->base - 1;
44,369✔
108
  cTValue *lim = J->L->base - J->baseslot + LJ_FR2;
44,369✔
109
  GCfunc *fn = frame_func(frame);
44,369✔
110
  cTValue *ftop = isluafunc(fn) ? (frame+funcproto(fn)->framesize) : J->L->top;
44,369✔
111
#if LJ_FR2
112
  uint64_t pcbase = (u64ptr(J->pc) << 8) | (J->baseslot - 2);
44,369✔
113
  lj_assertJ(2 <= J->baseslot && J->baseslot <= 257, "bad baseslot");
44,369✔
114
  memcpy(map, &pcbase, sizeof(uint64_t));
44,369✔
115
#else
116
  MSize f = 0;
117
  map[f++] = SNAP_MKPC(J->pc);  /* The current PC is always the first entry. */
118
#endif
119
  while (frame > lim) {  /* Backwards traversal of all frames above base. */
78,286✔
120
    if (frame_islua(frame)) {
33,917✔
121
#if !LJ_FR2
122
      map[f++] = SNAP_MKPC(frame_pc(frame));
123
#endif
124
      frame = frame_prevl(frame);
33,515✔
125
    } else if (frame_iscont(frame)) {
402✔
126
#if !LJ_FR2
127
      map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
128
      map[f++] = SNAP_MKPC(frame_contpc(frame));
129
#endif
130
      frame = frame_prevd(frame);
148✔
131
    } else {
132
      lj_assertJ(!frame_isc(frame), "broken frame chain");
254✔
133
#if !LJ_FR2
134
      map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
135
#endif
136
      frame = frame_prevd(frame);
254✔
137
      continue;
254✔
138
    }
139
    if (frame + funcproto(frame_func(frame))->framesize > ftop)
33,663✔
140
      ftop = frame + funcproto(frame_func(frame))->framesize;
141
  }
142
  *topslot = (uint8_t)(ftop - lim);
44,369✔
143
#if LJ_FR2
144
  lj_assertJ(sizeof(SnapEntry) * 2 == sizeof(uint64_t), "bad SnapEntry def");
44,369✔
145
  return 2;
44,369✔
146
#else
147
  lj_assertJ(f == (MSize)(1 + J->framedepth), "miscalculated snapshot size");
148
  return f;
149
#endif
150
}
151

152
/* Take a snapshot of the current stack. */
153
static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap)
44,369✔
154
{
155
  BCReg nslots = J->baseslot + J->maxslot;
44,369✔
156
  MSize nent;
44,369✔
157
  SnapEntry *p;
44,369✔
158
  /* Conservative estimate. */
159
  lj_snap_grow_map(J, nsnapmap + nslots + (MSize)(LJ_FR2?2:J->framedepth+1));
44,369✔
160
  p = &J->cur.snapmap[nsnapmap];
44,369✔
161
  nent = snapshot_slots(J, p, nslots);
44,369✔
162
  snap->nent = (uint8_t)nent;
44,369✔
163
  nent += snapshot_framelinks(J, p + nent, &snap->topslot);
44,369✔
164
  snap->mapofs = (uint32_t)nsnapmap;
44,369✔
165
  snap->ref = (IRRef1)J->cur.nins;
44,369✔
166
  snap->mcofs = 0;
44,369✔
167
  snap->nslots = (uint8_t)nslots;
44,369✔
168
  snap->count = 0;
44,369✔
169
  J->cur.nsnapmap = (uint32_t)(nsnapmap + nent);
44,369✔
170
}
44,369✔
171

172
/* Add or merge a snapshot. */
173
void lj_snap_add(jit_State *J)
44,369✔
174
{
175
  MSize nsnap = J->cur.nsnap;
44,369✔
176
  MSize nsnapmap = J->cur.nsnapmap;
44,369✔
177
  /* Merge if no ins. inbetween or if requested and no guard inbetween. */
178
  if ((nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins) ||
44,369✔
179
      (J->mergesnap && !irt_isguard(J->guardemit))) {
31,470✔
180
    if (nsnap == 1) {  /* But preserve snap #0 PC. */
16,047✔
181
      emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
584✔
182
      goto nomerge;
584✔
183
    }
184
    nsnapmap = J->cur.snap[--nsnap].mapofs;
15,463✔
185
  } else {
186
  nomerge:
28,322✔
187
    lj_snap_grow_buf(J, nsnap+1);
28,906✔
188
    J->cur.nsnap = (uint16_t)(nsnap+1);
28,906✔
189
  }
190
  J->mergesnap = 0;
44,369✔
191
  J->guardemit.irt = 0;
44,369✔
192
  snapshot_stack(J, &J->cur.snap[nsnap], nsnapmap);
44,369✔
193
}
44,369✔
194

195
/* -- Snapshot modification ----------------------------------------------- */
196

197
#define SNAP_USEDEF_SLOTS        (LJ_MAX_JSLOTS+LJ_STACK_EXTRA)
198

199
/* Find unused slots with reaching-definitions bytecode data-flow analysis. */
200
static BCReg snap_usedef(jit_State *J, uint8_t *udf,
201
                         const BCIns *pc, BCReg maxslot)
202
{
203
  BCReg s;
204
  GCobj *o;
205

206
  if (maxslot == 0) return 0;
207
#ifdef LUAJIT_USE_VALGRIND
208
  /* Avoid errors for harmless reads beyond maxslot. */
209
  memset(udf, 1, SNAP_USEDEF_SLOTS);
210
#else
211
  memset(udf, 1, maxslot);
212
#endif
213

214
  /* Treat open upvalues as used. */
215
  o = gcref(J->L->openupval);
216
  while (o) {
217
    if (uvval(gco2uv(o)) < J->L->base) break;
218
    udf[uvval(gco2uv(o)) - J->L->base] = 0;
219
    o = gcref(o->gch.nextgc);
220
  }
221

222
#define USE_SLOT(s)                udf[(s)] &= ~1
223
#define DEF_SLOT(s)                udf[(s)] *= 3
224

225
  /* Scan through following bytecode and check for uses/defs. */
226
  lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
227
             "snapshot PC out of range");
228
  for (;;) {
229
    BCIns ins = *pc++;
230
    BCOp op = bc_op(ins);
231
    switch (bcmode_b(op)) {
232
    case BCMvar: USE_SLOT(bc_b(ins)); break;
233
    default: break;
234
    }
235
    switch (bcmode_c(op)) {
236
    case BCMvar: USE_SLOT(bc_c(ins)); break;
237
    case BCMrbase:
238
      lj_assertJ(op == BC_CAT, "unhandled op %d with RC rbase", op);
239
      for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s);
240
      for (; s < maxslot; s++) DEF_SLOT(s);
241
      break;
242
    case BCMjump:
243
    handle_jump: {
244
      BCReg minslot = bc_a(ins);
245
      if (op >= BC_FORI && op <= BC_JFORL) minslot += FORL_EXT;
246
      else if (op >= BC_ITERL && op <= BC_JITERL) minslot += bc_b(pc[-2])-1;
247
      else if (op == BC_UCLO) { pc += bc_j(ins); break; }
248
      for (s = minslot; s < maxslot; s++) DEF_SLOT(s);
249
      return minslot < maxslot ? minslot : maxslot;
250
      }
251
    case BCMlit:
252
      if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
253
        goto handle_jump;
254
      } else if (bc_isret(op)) {
255
        BCReg top = op == BC_RETM ? maxslot : (bc_a(ins) + bc_d(ins)-1);
256
        for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
257
        for (; s < top; s++) USE_SLOT(s);
258
        for (; s < maxslot; s++) DEF_SLOT(s);
259
        return 0;
260
      }
261
      break;
262
    case BCMfunc: return maxslot;  /* NYI: will abort, anyway. */
263
    default: break;
264
    }
265
    switch (bcmode_a(op)) {
266
    case BCMvar: USE_SLOT(bc_a(ins)); break;
267
    case BCMdst:
268
       if (!(op == BC_ISTC || op == BC_ISFC)) DEF_SLOT(bc_a(ins));
269
       break;
270
    case BCMbase:
271
      if (op >= BC_CALLM && op <= BC_ITERN) {
272
        BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ?
273
                    maxslot : (bc_a(ins) + bc_c(ins)+LJ_FR2);
274
        if (LJ_FR2) DEF_SLOT(bc_a(ins)+1);
275
        s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0);
276
        for (; s < top; s++) USE_SLOT(s);
277
        for (; s < maxslot; s++) DEF_SLOT(s);
278
        if (op == BC_CALLT || op == BC_CALLMT) {
279
          for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
280
          return 0;
281
        }
282
      } else if (op == BC_VARG) {
283
        return maxslot;  /* NYI: punt. */
284
      } else if (op == BC_KNIL) {
285
        for (s = bc_a(ins); s <= bc_d(ins); s++) DEF_SLOT(s);
286
      } else if (op == BC_TSETM) {
287
        for (s = bc_a(ins)-1; s < maxslot; s++) USE_SLOT(s);
288
      }
289
      break;
290
    default: break;
291
    }
292
    lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
293
               "use/def analysis PC out of range");
294
  }
295

296
#undef USE_SLOT
297
#undef DEF_SLOT
298

299
  return 0;  /* unreachable */
300
}
301

302
/* Purge dead slots before the next snapshot. */
303
void lj_snap_purge(jit_State *J)
20,822✔
304
{
305
  uint8_t udf[SNAP_USEDEF_SLOTS];
20,822✔
306
  BCReg s, maxslot = J->maxslot;
20,822✔
307
  if (bc_op(*J->pc) == BC_FUNCV && maxslot > J->pt->numparams)
20,822✔
308
    maxslot = J->pt->numparams;
309
  s = snap_usedef(J, udf, J->pc, maxslot);
20,822✔
310
  for (; s < maxslot; s++)
70,727✔
311
    if (udf[s] != 0)
29,083✔
312
      J->base[s] = 0;  /* Purge dead slots. */
14,344✔
313
}
20,822✔
314

315
/* Shrink last snapshot. */
316
void lj_snap_shrink(jit_State *J)
11,231✔
317
{
318
  SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
11,231✔
319
  SnapEntry *map = &J->cur.snapmap[snap->mapofs];
11,231✔
320
  MSize n, m, nlim, nent = snap->nent;
11,231✔
321
  uint8_t udf[SNAP_USEDEF_SLOTS];
11,231✔
322
  BCReg maxslot = J->maxslot;
11,231✔
323
  BCReg baseslot = J->baseslot;
11,231✔
324
  BCReg minslot = snap_usedef(J, udf, snap_pc(&map[nent]), maxslot);
11,231✔
325
  maxslot += baseslot;
11,231✔
326
  minslot += baseslot;
11,231✔
327
  snap->nslots = (uint8_t)maxslot;
11,231✔
328
  for (n = m = 0; n < nent; n++) {  /* Remove unused slots from snapshot. */
88,428✔
329
    BCReg s = snap_slot(map[n]);
77,197✔
330
    if (s < minslot || (s < maxslot && udf[s-baseslot] == 0))
77,197✔
331
      map[m++] = map[n];  /* Only copy used slots. */
65,799✔
332
  }
333
  snap->nent = (uint8_t)m;
11,231✔
334
  nlim = J->cur.nsnapmap - snap->mapofs - 1;
11,231✔
335
  while (n <= nlim) map[m++] = map[n++];  /* Move PC + frame links down. */
33,693✔
336
  J->cur.nsnapmap = (uint32_t)(snap->mapofs + m);  /* Free up space in map. */
11,231✔
337
}
11,231✔
338

339
/* -- Snapshot access ----------------------------------------------------- */
340

341
/* Initialize a Bloom Filter with all renamed refs.
342
** There are very few renames (often none), so the filter has
343
** very few bits set. This makes it suitable for negative filtering.
344
*/
345
static BloomFilter snap_renamefilter(GCtrace *T, SnapNo lim)
32,270✔
346
{
347
  BloomFilter rfilt = 0;
32,270✔
348
  IRIns *ir;
32,270✔
349
  for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
34,820✔
350
    if (ir->op2 <= lim)
2,550✔
351
      bloomset(rfilt, ir->op1);
1,523✔
352
  return rfilt;
29,831✔
353
}
354

355
/* Process matching renames to find the original RegSP. */
356
static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs)
1,090✔
357
{
358
  IRIns *ir;
1,090✔
359
  for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
4,567✔
360
    if (ir->op1 == ref && ir->op2 <= lim)
3,477✔
361
      rs = ir->prev;
1,156✔
362
  return rs;
363
}
364

365
/* Copy RegSP from parent snapshot to the parent links of the IR. */
366
IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno, IRIns *ir)
2,439✔
367
{
368
  SnapShot *snap = &T->snap[snapno];
2,439✔
369
  SnapEntry *map = &T->snapmap[snap->mapofs];
2,439✔
370
  BloomFilter rfilt = snap_renamefilter(T, snapno);
2,439✔
371
  MSize n = 0;
372
  IRRef ref = 0;
6,310✔
373
  UNUSED(J);
6,310✔
374
  for ( ; ; ir++) {
10,181✔
375
    uint32_t rs;
6,310✔
376
    if (ir->o == IR_SLOAD) {
6,310✔
377
      if (!(ir->op2 & IRSLOAD_PARENT)) break;
4,659✔
378
      for ( ; ; n++) {
16,639✔
379
        lj_assertJ(n < snap->nent, "slot %d not found in snapshot", ir->op1);
6,413✔
380
        if (snap_slot(map[n]) == ir->op1) {
10,226✔
381
          ref = snap_ref(map[n++]);
3,813✔
382
          break;
3,813✔
383
        }
384
      }
385
    } else if (LJ_SOFTFP32 && ir->o == IR_HIOP) {
1,651✔
386
      ref++;
387
    } else if (ir->o == IR_PVAL) {
1,651✔
388
      ref = ir->op1 + REF_BIAS;
58✔
389
    } else {
390
      break;
391
    }
392
    rs = T->ir[ref].prev;
3,871✔
393
    if (bloomtest(rfilt, ref))
3,871✔
394
      rs = snap_renameref(T, snapno, ref, rs);
64✔
395
    ir->prev = (uint16_t)rs;
3,871✔
396
    lj_assertJ(regsp_used(rs), "unused IR %04d in snapshot", ref - REF_BIAS);
3,871✔
397
  }
398
  return ir;
2,439✔
399
}
400

401
/* -- Snapshot replay ----------------------------------------------------- */
402

403
/* Replay constant from parent trace. */
404
static TRef snap_replay_const(jit_State *J, IRIns *ir)
6,264✔
405
{
406
  /* Only have to deal with constants that can occur in stack slots. */
407
  switch ((IROp)ir->o) {
6,264✔
408
  case IR_KPRI: return TREF_PRI(irt_type(ir->t));
45✔
409
  case IR_KINT: return lj_ir_kint(J, ir->i);
202✔
410
  case IR_KGC: return lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t));
4,022✔
411
  case IR_KNUM: case IR_KINT64:
1,995✔
412
    return lj_ir_k64(J, (IROp)ir->o, ir_k64(ir)->u64);
1,995✔
413
  case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir));  /* Continuation. */
×
414
  default: lj_assertJ(0, "bad IR constant op %d", ir->o); return TREF_NIL;
415
  }
416
}
417

418
/* De-duplicate parent reference. */
419
static TRef snap_dedup(jit_State *J, SnapEntry *map, MSize nmax, IRRef ref)
420
{
421
  MSize j;
422
  for (j = 0; j < nmax; j++)
3,360✔
423
    if (snap_ref(map[j]) == ref)
3,293✔
424
      return J->slot[snap_slot(map[j])] & ~(SNAP_CONT|SNAP_FRAME);
967✔
425
  return 0;
426
}
427

428
/* Emit parent reference with de-duplication. */
429
static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax,
430
                      BloomFilter seen, IRRef ref)
431
{
432
  IRIns *ir = &T->ir[ref];
433
  TRef tr;
434
  if (irref_isk(ref))
435
    tr = snap_replay_const(J, ir);
436
  else if (!regsp_used(ir->prev))
437
    tr = 0;
438
  else if (!bloomtest(seen, ref) || (tr = snap_dedup(J, map, nmax, ref)) == 0)
439
    tr = emitir(IRT(IR_PVAL, irt_type(ir->t)), ref - REF_BIAS, 0);
440
  return tr;
441
}
442

443
/* Check whether a sunk store corresponds to an allocation. Slow path. */
444
static int snap_sunk_store2(GCtrace *T, IRIns *ira, IRIns *irs)
445
{
446
  if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
447
      irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
448
    IRIns *irk = &T->ir[irs->op1];
449
    if (irk->o == IR_AREF || irk->o == IR_HREFK)
450
      irk = &T->ir[irk->op1];
451
    return (&T->ir[irk->op1] == ira);
452
  }
453
  return 0;
454
}
455

456
/* Check whether a sunk store corresponds to an allocation. Fast path. */
457
static LJ_AINLINE int snap_sunk_store(GCtrace *T, IRIns *ira, IRIns *irs)
54✔
458
{
459
  if (irs->s != 255)
54✔
460
    return (ira + irs->s == irs);  /* Fast check. */
54✔
461
  return snap_sunk_store2(T, ira, irs);
×
462
}
463

464
/* Replay snapshot state to setup side trace. */
465
void lj_snap_replay(jit_State *J, GCtrace *T)
2,288✔
466
{
467
  SnapShot *snap = &T->snap[J->exitno];
2,288✔
468
  SnapEntry *map = &T->snapmap[snap->mapofs];
2,288✔
469
  MSize n, nent = snap->nent;
2,288✔
470
  BloomFilter seen = 0;
2,288✔
471
  int pass23 = 0;
2,288✔
472
  J->framedepth = 0;
2,288✔
473
  /* Emit IR for slots inherited from parent snapshot. */
474
  for (n = 0; n < nent; n++) {
13,814✔
475
    SnapEntry sn = map[n];
11,526✔
476
    BCReg s = snap_slot(sn);
11,526✔
477
    IRRef ref = snap_ref(sn);
11,526✔
478
    IRIns *ir = &T->ir[ref];
11,526✔
479
    TRef tr;
11,526✔
480
    /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */
481
    if (bloomtest(seen, ref) && (tr = snap_dedup(J, map, n, ref)) != 0)
12,560✔
482
      goto setslot;
967✔
483
    bloomset(seen, ref);
10,559✔
484
    if (irref_isk(ref)) {
10,559✔
485
      /* See special treatment of LJ_FR2 slot 1 in snapshot_slots() above. */
486
      if (LJ_FR2 && (sn == SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL)))
6,474✔
487
        tr = 0;
488
      else
489
        tr = snap_replay_const(J, ir);
6,142✔
490
    } else if (!regsp_used(ir->prev)) {
4,085✔
491
      pass23 = 1;
492
      lj_assertJ(s != 0, "unused slot 0 in snapshot");
493
      tr = s;
494
    } else {
495
      IRType t = irt_type(ir->t);
4,029✔
496
      uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT;
4,029✔
497
      if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM;
4,029✔
498
      if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY);
4,029✔
499
      tr = emitir_raw(IRT(IR_SLOAD, t), s, mode);
4,029✔
500
    }
501
  setslot:
11,526✔
502
    J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME));  /* Same as TREF_* flags. */
11,526✔
503
    J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && (s != LJ_FR2));
11,526✔
504
    if ((sn & SNAP_FRAME))
11,526✔
505
      J->baseslot = s+1;
2,429✔
506
  }
507
  if (pass23) {
2,288✔
508
    IRIns *irlast = &T->ir[snap->ref];
51✔
509
    pass23 = 0;
51✔
510
    /* Emit dependent PVALs. */
511
    for (n = 0; n < nent; n++) {
194✔
512
      SnapEntry sn = map[n];
143✔
513
      IRRef refp = snap_ref(sn);
143✔
514
      IRIns *ir = &T->ir[refp];
143✔
515
      if (regsp_reg(ir->r) == RID_SUNK) {
143✔
516
        if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue;
55✔
517
        pass23 = 1;
54✔
518
        lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
54✔
519
                   ir->o == IR_CNEW || ir->o == IR_CNEWI,
520
                   "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
521
        if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1);
54✔
522
        if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2);
54✔
523
        if (LJ_HASFFI && ir->o == IR_CNEWI) {
54✔
524
          if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP)
525
            snap_pref(J, T, map, nent, seen, (ir+1)->op2);
526
        } else {
527
          IRIns *irs;
11✔
528
          for (irs = ir+1; irs < irlast; irs++)
182✔
529
            if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
198✔
530
              if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
19✔
531
                snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
6✔
532
              else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
533
                       irs+1 < irlast && (irs+1)->o == IR_HIOP)
534
                snap_pref(J, T, map, nent, seen, (irs+1)->op2);
535
            }
536
        }
537
      } else if (!irref_isk(refp) && !regsp_used(ir->prev)) {
88✔
538
        lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
2✔
539
                   "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
540
        J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1);
2✔
541
      }
542
    }
543
    /* Replay sunk instructions. */
544
    for (n = 0; pass23 && n < nent; n++) {
191✔
545
      SnapEntry sn = map[n];
140✔
546
      IRRef refp = snap_ref(sn);
140✔
547
      IRIns *ir = &T->ir[refp];
140✔
548
      if (regsp_reg(ir->r) == RID_SUNK) {
140✔
549
        TRef op1, op2;
55✔
550
        if (J->slot[snap_slot(sn)] != snap_slot(sn)) {  /* De-dup allocs. */
55✔
551
          J->slot[snap_slot(sn)] = J->slot[J->slot[snap_slot(sn)]];
1✔
552
          continue;
1✔
553
        }
554
        op1 = ir->op1;
54✔
555
        if (op1 >= T->nk) op1 = snap_pref(J, T, map, nent, seen, op1);
54✔
556
        op2 = ir->op2;
54✔
557
        if (op2 >= T->nk) op2 = snap_pref(J, T, map, nent, seen, op2);
54✔
558
        if (LJ_HASFFI && ir->o == IR_CNEWI) {
54✔
559
          if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) {
43✔
560
            lj_needsplit(J);  /* Emit joining HIOP. */
561
            op2 = emitir_raw(IRT(IR_HIOP, IRT_I64), op2,
562
                             snap_pref(J, T, map, nent, seen, (ir+1)->op2));
563
          }
564
          J->slot[snap_slot(sn)] = emitir(ir->ot & ~(IRT_MARK|IRT_ISPHI), op1, op2);
43✔
565
        } else {
566
          IRIns *irs;
11✔
567
          TRef tr = emitir(ir->ot, op1, op2);
11✔
568
          J->slot[snap_slot(sn)] = tr;
11✔
569
          for (irs = ir+1; irs < irlast; irs++)
182✔
570
            if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
217✔
571
              IRIns *irr = &T->ir[irs->op1];
19✔
572
              TRef val, key = irr->op2, tmp = tr;
19✔
573
              if (irr->o != IR_FREF) {
19✔
574
                IRIns *irk = &T->ir[key];
18✔
575
                if (irr->o == IR_HREFK)
18✔
576
                  key = lj_ir_kslot(J, snap_replay_const(J, &T->ir[irk->op1]),
2✔
577
                                    irk->op2);
2✔
578
                else
579
                  key = snap_replay_const(J, irk);
16✔
580
                if (irr->o == IR_HREFK || irr->o == IR_AREF) {
18✔
581
                  IRIns *irf = &T->ir[irr->op1];
10✔
582
                  tmp = emitir(irf->ot, tmp, irf->op2);
10✔
583
                }
584
              }
585
              tmp = emitir(irr->ot, tmp, key);
19✔
586
              val = snap_pref(J, T, map, nent, seen, irs->op2);
19✔
587
              if (val == 0) {
19✔
588
                IRIns *irc = &T->ir[irs->op2];
6✔
589
                lj_assertJ(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT,
6✔
590
                           "sunk store for parent IR %04d with bad op %d",
591
                           refp - REF_BIAS, irc->o);
592
                val = snap_pref(J, T, map, nent, seen, irc->op1);
6✔
593
                val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
6✔
594
              } else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
595
                         irs+1 < irlast && (irs+1)->o == IR_HIOP) {
596
                IRType t = IRT_I64;
597
                if (LJ_SOFTFP32 && irt_type((irs+1)->t) == IRT_SOFTFP)
598
                  t = IRT_NUM;
599
                lj_needsplit(J);
600
                if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) {
601
                  uint64_t k = (uint32_t)T->ir[irs->op2].i +
602
                               ((uint64_t)T->ir[(irs+1)->op2].i << 32);
603
                  val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM, k);
604
                } else {
605
                  val = emitir_raw(IRT(IR_HIOP, t), val,
606
                          snap_pref(J, T, map, nent, seen, (irs+1)->op2));
607
                }
608
                tmp = emitir(IRT(irs->o, t), tmp, val);
609
                continue;
610
              }
611
              tmp = emitir(irs->ot, tmp, val);
19✔
612
            } else if (LJ_HASFFI && irs->o == IR_XBAR && ir->o == IR_CNEW) {
152✔
613
              emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
×
614
            }
615
        }
616
      }
617
    }
618
  }
619
  J->base = J->slot + J->baseslot;
2,288✔
620
  J->maxslot = snap->nslots - J->baseslot;
2,288✔
621
  lj_snap_add(J);
2,288✔
622
  if (pass23)  /* Need explicit GC step _after_ initial snapshot. */
2,288✔
623
    emitir_raw(IRTG(IR_GCSTEP, IRT_NIL), 0, 0);
50✔
624
}
2,288✔
625

626
/* -- Snapshot restore ---------------------------------------------------- */
627

628
static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
629
                        SnapNo snapno, BloomFilter rfilt,
630
                        IRIns *ir, TValue *o);
631

632
/* Restore a value from the trace exit state. */
633
static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex,
130,501✔
634
                            SnapNo snapno, BloomFilter rfilt,
635
                            IRRef ref, TValue *o)
636
{
637
  IRIns *ir = &T->ir[ref];
130,581✔
638
  IRType1 t = ir->t;
130,581✔
639
  RegSP rs = ir->prev;
130,581✔
640
  if (irref_isk(ref)) {  /* Restore constant slot. */
130,581✔
641
    if (ir->o == IR_KPTR) {
78,848✔
642
      o->u64 = (uint64_t)(uintptr_t)ir_kptr(ir);
×
643
    } else {
644
      lj_assertJ(!(ir->o == IR_KKPTR || ir->o == IR_KNULL),
78,848✔
645
                 "restore of const from IR %04d with bad op %d",
646
                 ref - REF_BIAS, ir->o);
647
      lj_ir_kvalue(J->L, o, ir);
78,848✔
648
    }
649
    return;
78,848✔
650
  }
651
  if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
51,733✔
652
    rs = snap_renameref(T, snapno, ref, rs);
1,026✔
653
  if (ra_hasspill(regsp_spill(rs))) {  /* Restore from spill slot. */
51,733✔
654
    int32_t *sps = &ex->spill[regsp_spill(rs)];
2,294✔
655
    if (irt_isinteger(t)) {
2,294✔
656
      setintV(o, *sps);
314✔
657
#if !LJ_SOFTFP32
658
    } else if (irt_isnum(t)) {
1,980✔
659
      o->u64 = *(uint64_t *)sps;
1,273✔
660
#endif
661
#if LJ_64 && !LJ_GC64
662
    } else if (irt_islightud(t)) {
663
      /* 64 bit lightuserdata which may escape already has the tag bits. */
664
      o->u64 = *(uint64_t *)sps;
665
#endif
666
    } else {
667
      lj_assertJ(!irt_ispri(t), "PRI ref with spill slot");
707✔
668
      setgcV(J->L, o, (GCobj *)(uintptr_t)*(GCSize *)sps, irt_toitype(t));
707✔
669
    }
670
  } else {  /* Restore from register. */
671
    Reg r = regsp_reg(rs);
49,439✔
672
    if (ra_noreg(r)) {
49,439✔
673
      lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
80✔
674
                 "restore from IR %04d has no reg", ref - REF_BIAS);
675
      snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o);
80✔
676
      if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o));
677
      return;
80✔
678
    } else if (irt_isinteger(t)) {
49,359✔
679
      setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]);
3,450✔
680
#if !LJ_SOFTFP
681
    } else if (irt_isnum(t)) {
45,909✔
682
      setnumV(o, ex->fpr[r-RID_MIN_FPR]);
14,412✔
683
#elif LJ_64  /* && LJ_SOFTFP */
684
    } else if (irt_isnum(t)) {
685
      o->u64 = ex->gpr[r-RID_MIN_GPR];
686
#endif
687
#if LJ_64 && !LJ_GC64
688
    } else if (irt_is64(t)) {
689
      /* 64 bit values that already have the tag bits. */
690
      o->u64 = ex->gpr[r-RID_MIN_GPR];
691
#endif
692
    } else if (irt_ispri(t)) {
31,497✔
693
      setpriV(o, irt_toitype(t));
×
694
    } else {
695
      setgcV(J->L, o, (GCobj *)ex->gpr[r-RID_MIN_GPR], irt_toitype(t));
31,497✔
696
    }
697
  }
698
}
699

700
#if LJ_HASFFI
701
/* Restore raw data from the trace exit state. */
702
static void snap_restoredata(jit_State *J, GCtrace *T, ExitState *ex,
594✔
703
                             SnapNo snapno, BloomFilter rfilt,
704
                             IRRef ref, void *dst, CTSize sz)
705
{
706
  IRIns *ir = &T->ir[ref];
594✔
707
  RegSP rs = ir->prev;
594✔
708
  int32_t *src;
594✔
709
  uint64_t tmp;
594✔
710
  UNUSED(J);
594✔
711
  if (irref_isk(ref)) {
594✔
712
    if (ir_isk64(ir)) {
3✔
713
      src = (int32_t *)&ir[1];
2✔
714
    } else if (sz == 8) {
1✔
715
      tmp = (uint64_t)(uint32_t)ir->i;
×
716
      src = (int32_t *)&tmp;
×
717
    } else {
718
      src = &ir->i;
1✔
719
    }
720
  } else {
721
    if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
591✔
722
      rs = snap_renameref(T, snapno, ref, rs);
×
723
    if (ra_hasspill(regsp_spill(rs))) {
591✔
724
      src = &ex->spill[regsp_spill(rs)];
21✔
725
      if (sz == 8 && !irt_is64(ir->t)) {
21✔
726
        tmp = (uint64_t)(uint32_t)*src;
×
727
        src = (int32_t *)&tmp;
×
728
      }
729
    } else {
730
      Reg r = regsp_reg(rs);
570✔
731
      if (ra_noreg(r)) {
570✔
732
        /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */
733
        lj_assertJ(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
13✔
734
                   "restore from IR %04d has no reg", ref - REF_BIAS);
735
        snap_restoredata(J, T, ex, snapno, rfilt, ir->op1, dst, 4);
13✔
736
        *(lua_Number *)dst = (lua_Number)*(int32_t *)dst;
13✔
737
        return;
13✔
738
      }
739
      src = (int32_t *)&ex->gpr[r-RID_MIN_GPR];
557✔
740
#if !LJ_SOFTFP
741
      if (r >= RID_MAX_GPR) {
557✔
742
        src = (int32_t *)&ex->fpr[r-RID_MIN_FPR];
16✔
743
#if LJ_TARGET_PPC
744
        if (sz == 4) {  /* PPC FPRs are always doubles. */
745
          *(float *)dst = (float)*(double *)src;
746
          return;
747
        }
748
#else
749
        if (LJ_BE && sz == 4) src++;
16✔
750
#endif
751
      } else
752
#endif
753
      if (LJ_64 && LJ_BE && sz == 4) src++;
754
    }
755
  }
756
  lj_assertJ(sz == 1 || sz == 2 || sz == 4 || sz == 8,
581✔
757
             "restore from IR %04d with bad size %d", ref - REF_BIAS, sz);
758
  if (sz == 4) *(int32_t *)dst = *src;
581✔
759
  else if (sz == 8) *(int64_t *)dst = *(int64_t *)src;
531✔
760
  else if (sz == 1) *(int8_t *)dst = (int8_t)*src;
×
761
  else *(int16_t *)dst = (int16_t)*src;
×
762
}
763
#endif
764

765
/* Unsink allocation from the trace exit state. Unsink sunk stores. */
766
static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
767
                        SnapNo snapno, BloomFilter rfilt,
768
                        IRIns *ir, TValue *o)
769
{
770
  lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
771
             ir->o == IR_CNEW || ir->o == IR_CNEWI,
772
             "sunk allocation with bad op %d", ir->o);
773
#if LJ_HASFFI
774
  if (ir->o == IR_CNEW || ir->o == IR_CNEWI) {
775
    CTState *cts = ctype_cts(J->L);
776
    CTypeID id = (CTypeID)T->ir[ir->op1].i;
777
    CTSize sz;
778
    CTInfo info = lj_ctype_info(cts, id, &sz);
779
    GCcdata *cd = lj_cdata_newx(cts, id, sz, info);
780
    setcdataV(J->L, o, cd);
781
    if (ir->o == IR_CNEWI) {
782
      uint8_t *p = (uint8_t *)cdataptr(cd);
783
      lj_assertJ(sz == 4 || sz == 8, "sunk cdata with bad size %d", sz);
784
      if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) {
785
        snap_restoredata(J, T, ex, snapno, rfilt, (ir+1)->op2,
786
                         LJ_LE ? p+4 : p, 4);
787
        if (LJ_BE) p += 4;
788
        sz = 4;
789
      }
790
      snap_restoredata(J, T, ex, snapno, rfilt, ir->op2, p, sz);
791
    } else {
792
      IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
793
      for (irs = ir+1; irs < irlast; irs++)
794
        if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
795
          IRIns *iro = &T->ir[T->ir[irs->op1].op2];
796
          uint8_t *p = (uint8_t *)cd;
797
          CTSize szs;
798
          lj_assertJ(irs->o == IR_XSTORE, "sunk store with bad op %d", irs->o);
799
          lj_assertJ(T->ir[irs->op1].o == IR_ADD,
800
                     "sunk store with bad add op %d", T->ir[irs->op1].o);
801
          lj_assertJ(iro->o == IR_KINT || iro->o == IR_KINT64,
802
                     "sunk store with bad const offset op %d", iro->o);
803
          if (irt_is64(irs->t)) szs = 8;
804
          else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1;
805
          else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2;
806
          else szs = 4;
807
          if (LJ_64 && iro->o == IR_KINT64)
808
            p += (int64_t)ir_k64(iro)->u64;
809
          else
810
            p += iro->i;
811
          lj_assertJ(p >= (uint8_t *)cdataptr(cd) &&
812
                     p + szs <= (uint8_t *)cdataptr(cd) + sz,
813
                     "sunk store with offset out of range");
814
          if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
815
            lj_assertJ(szs == 4, "sunk store with bad size %d", szs);
816
            snap_restoredata(J, T, ex, snapno, rfilt, (irs+1)->op2,
817
                             LJ_LE ? p+4 : p, 4);
818
            if (LJ_BE) p += 4;
819
          }
820
          snap_restoredata(J, T, ex, snapno, rfilt, irs->op2, p, szs);
821
        }
822
    }
823
  } else
824
#endif
825
  {
826
    IRIns *irs, *irlast;
827
    GCtab *t = ir->o == IR_TNEW ? lj_tab_new(J->L, ir->op1, ir->op2) :
828
                                  lj_tab_dup(J->L, ir_ktab(&T->ir[ir->op1]));
829
    settabV(J->L, o, t);
830
    irlast = &T->ir[T->snap[snapno].ref];
831
    for (irs = ir+1; irs < irlast; irs++)
832
      if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
833
        IRIns *irk = &T->ir[irs->op1];
834
        TValue tmp, *val;
835
        lj_assertJ(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
836
                   irs->o == IR_FSTORE,
837
                   "sunk store with bad op %d", irs->o);
838
        if (irk->o == IR_FREF) {
839
          lj_assertJ(irk->op2 == IRFL_TAB_META,
840
                     "sunk store with bad field %d", irk->op2);
841
          snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp);
842
          /* NOBARRIER: The table is new (marked white). */
843
          setgcref(t->metatable, obj2gco(tabV(&tmp)));
844
        } else {
845
          irk = &T->ir[irk->op2];
846
          if (irk->o == IR_KSLOT) irk = &T->ir[irk->op1];
847
          lj_ir_kvalue(J->L, &tmp, irk);
848
          val = lj_tab_set(J->L, t, &tmp);
849
          /* NOBARRIER: The table is new (marked white). */
850
          snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, val);
851
          if (LJ_SOFTFP32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
852
            snap_restoreval(J, T, ex, snapno, rfilt, (irs+1)->op2, &tmp);
853
            val->u32.hi = tmp.u32.lo;
854
          }
855
        }
856
      }
857
  }
858
}
859

860
/* Restore interpreter state from exit state with the help of a snapshot. */
861
const BCIns *lj_snap_restore(jit_State *J, void *exptr)
29,831✔
862
{
863
  ExitState *ex = (ExitState *)exptr;
29,831✔
864
  SnapNo snapno = J->exitno;  /* For now, snapno == exitno. */
29,831✔
865
  GCtrace *T = traceref(J, J->parent);
29,831✔
866
  SnapShot *snap = &T->snap[snapno];
29,831✔
867
  MSize n, nent = snap->nent;
29,831✔
868
  SnapEntry *map = &T->snapmap[snap->mapofs];
29,831✔
869
#if !LJ_FR2 || defined(LUA_USE_ASSERT)
870
  SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1-LJ_FR2];
871
#endif
872
#if !LJ_FR2
873
  ptrdiff_t ftsz0;
874
#endif
875
  TValue *frame;
29,831✔
876
  BloomFilter rfilt = snap_renamefilter(T, snapno);
29,831✔
877
  const BCIns *pc = snap_pc(&map[nent]);
29,831✔
878
  lua_State *L = J->L;
29,831✔
879

880
  /* Set interpreter PC to the next PC to get correct error messages. */
881
  setcframe_pc(cframe_raw(L->cframe), pc+1);
29,831✔
882

883
  /* Make sure the stack is big enough for the slots from the snapshot. */
884
  if (LJ_UNLIKELY(L->base + snap->topslot >= tvref(L->maxstack))) {
29,831✔
885
    L->top = curr_topL(L);
14✔
886
    lj_state_growstack(L, snap->topslot - curr_proto(L)->framesize);
14✔
887
  }
888

889
  /* Fill stack slots with data from the registers and spill slots. */
890
  frame = L->base-1-LJ_FR2;
29,830✔
891
#if !LJ_FR2
892
  ftsz0 = frame_ftsz(frame);  /* Preserve link to previous frame in slot #0. */
893
#endif
894
  for (n = 0; n < nent; n++) {
166,040✔
895
    SnapEntry sn = map[n];
136,210✔
896
    if (!(sn & SNAP_NORESTORE)) {
136,210✔
897
      TValue *o = &frame[snap_slot(sn)];
130,936✔
898
      IRRef ref = snap_ref(sn);
130,936✔
899
      IRIns *ir = &T->ir[ref];
130,936✔
900
      if (ir->r == RID_SUNK) {
130,936✔
901
        MSize j;
902
        for (j = 0; j < n; j++)
1,791✔
903
          if (snap_ref(map[j]) == ref) {  /* De-duplicate sunk allocations. */
1,154✔
904
            copyTV(L, o, &frame[snap_slot(map[j])]);
21✔
905
            goto dupslot;
21✔
906
          }
907
        snap_unsink(J, T, ex, snapno, rfilt, ir, o);
637✔
908
      dupslot:
658✔
909
        continue;
658✔
910
      }
911
      snap_restoreval(J, T, ex, snapno, rfilt, ref, o);
130,278✔
912
      if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM) && tvisint(o)) {
130,278✔
913
        TValue tmp;
914
        snap_restoreval(J, T, ex, snapno, rfilt, ref+1, &tmp);
915
        o->u32.hi = tmp.u32.lo;
916
#if !LJ_FR2
917
      } else if ((sn & (SNAP_CONT|SNAP_FRAME))) {
918
        /* Overwrite tag with frame link. */
919
        setframe_ftsz(o, snap_slot(sn) != 0 ? (int32_t)*flinks-- : ftsz0);
920
        L->base = o+1;
921
#endif
922
      }
923
    }
924
  }
925
#if LJ_FR2
926
  L->base += (map[nent+LJ_BE] & 0xff);
29,830✔
927
#endif
928
  lj_assertJ(map + nent == flinks, "inconsistent frames in snapshot");
29,830✔
929

930
  /* Compute current stack top. */
931
  switch (bc_op(*pc)) {
29,830✔
932
  default:
29,722✔
933
    if (bc_op(*pc) < BC_FUNCF) {
29,722✔
934
      L->top = curr_topL(L);
29,719✔
935
      break;
29,719✔
936
    }
937
    /* fallthrough */
938
  case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM:
939
    L->top = frame + snap->nslots;
111✔
940
    break;
111✔
941
  }
942
  J->nsnaprestore++;
29,830✔
943
  return pc;
29,830✔
944
}
945

946
#undef emitir_raw
947
#undef emitir
948

949
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc