• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 6851160821

13 Nov 2023 02:06PM UTC coverage: 88.458% (-0.008%) from 88.466%
6851160821

push

github

igormunkin
Fix last commit.

Reported by PluMGMK.

(cherry-picked from commit 224129a8e)

The `_VMEVENTS` table, where the error handler for GC finalizers
is set, was not cleared from the stack after the initialization.
This commit adds stack cleanup.

Maxim Kokryashkin:
* added the description and the test for the problem

Part of tarantool/tarantool#9145

Reviewed-by: Sergey Kaplun <skaplun@tarantool.org>
Reviewed-by: Sergey Bronnikov <sergeyb@tarantool.org>
Signed-off-by: Igor Munkin <imun@tarantool.org>

5361 of 5976 branches covered (0.0%)

Branch coverage included in aggregate %.

1 of 1 new or added line in 1 file covered. (100.0%)

33 existing lines in 8 files now uncovered.

20551 of 23317 relevant lines covered (88.14%)

2746324.23 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.7
/src/lj_snap.c
1
/*
2
** Snapshot handling.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_snap_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10

11
#if LJ_HASJIT
12

13
#include "lj_gc.h"
14
#include "lj_tab.h"
15
#include "lj_state.h"
16
#include "lj_frame.h"
17
#include "lj_bc.h"
18
#include "lj_ir.h"
19
#include "lj_jit.h"
20
#include "lj_iropt.h"
21
#include "lj_trace.h"
22
#include "lj_snap.h"
23
#include "lj_target.h"
24
#if LJ_HASFFI
25
#include "lj_ctype.h"
26
#include "lj_cdata.h"
27
#endif
28

29
/* Pass IR on to next optimization in chain (FOLD). */
30
#define emitir(ot, a, b)        (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
31

32
/* Emit raw IR without passing through optimizations. */
33
#define emitir_raw(ot, a, b)        (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
34

35
/* -- Snapshot buffer allocation ------------------------------------------ */
36

37
/* Grow snapshot buffer. */
38
void lj_snap_grow_buf_(jit_State *J, MSize need)
116✔
39
{
40
  MSize maxsnap = (MSize)J->param[JIT_P_maxsnap];
116✔
41
  if (need > maxsnap)
116✔
42
    lj_trace_err(J, LJ_TRERR_SNAPOV);
×
43
  lj_mem_growvec(J->L, J->snapbuf, J->sizesnap, maxsnap, SnapShot);
116✔
44
  J->cur.snap = J->snapbuf;
116✔
45
}
116✔
46

47
/* Grow snapshot map buffer. */
48
void lj_snap_grow_map_(jit_State *J, MSize need)
119✔
49
{
50
  if (need < 2*J->sizesnapmap)
119✔
51
    need = 2*J->sizesnapmap;
52
  else if (need < 64)
82✔
53
    need = 64;
54
  J->snapmapbuf = (SnapEntry *)lj_mem_realloc(J->L, J->snapmapbuf,
238✔
55
                    J->sizesnapmap*sizeof(SnapEntry), need*sizeof(SnapEntry));
119✔
56
  J->cur.snapmap = J->snapmapbuf;
119✔
57
  J->sizesnapmap = need;
119✔
58
}
119✔
59

60
/* -- Snapshot generation ------------------------------------------------- */
61

62
/* Add all modified slots to the snapshot. */
63
static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
69,778✔
64
{
65
  IRRef retf = J->chain[IR_RETF];  /* Limits SLOAD restore elimination. */
69,778✔
66
  BCReg s;
69,778✔
67
  MSize n = 0;
69,778✔
68
  for (s = 0; s < nslots; s++) {
786,843✔
69
    TRef tr = J->slot[s];
717,065✔
70
    IRRef ref = tref_ref(tr);
717,065✔
71
#if LJ_FR2
72
    if (s == 1) {  /* Ignore slot 1 in LJ_FR2 mode, except if tailcalled. */
717,065✔
73
      if ((tr & TREF_FRAME))
69,778✔
74
        map[n++] = SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL);
4,698✔
75
      continue;
69,778✔
76
    }
77
    if ((tr & (TREF_FRAME | TREF_CONT)) && !ref) {
647,287✔
78
      cTValue *base = J->L->base - J->baseslot;
3,665✔
79
      tr = J->slot[s] = (tr & 0xff0000) | lj_ir_k64(J, IR_KNUM, base[s].u64);
3,665✔
80
      ref = tref_ref(tr);
3,665✔
81
    }
82
#endif
83
    if (ref) {
647,287✔
84
      SnapEntry sn = SNAP_TR(s, tr);
321,126✔
85
      IRIns *ir = &J->cur.ir[ref];
321,126✔
86
      if ((LJ_FR2 || !(sn & (SNAP_CONT|SNAP_FRAME))) &&
321,126✔
87
          ir->o == IR_SLOAD && ir->op1 == s && ref > retf) {
321,126✔
88
        /* No need to snapshot unmodified non-inherited slots. */
89
        if (!(ir->op2 & IRSLOAD_INHERIT))
215,299✔
90
          continue;
168,809✔
91
        /* No need to restore readonly slots and unmodified non-parent slots. */
92
        if (!(LJ_DUALNUM && (ir->op2 & IRSLOAD_CONVERT)) &&
46,490✔
93
            (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT)
94
          sn |= SNAP_NORESTORE;
40,871✔
95
      }
96
      if (LJ_SOFTFP32 && irt_isnum(ir->t))
152,317✔
97
        sn |= SNAP_SOFTFPNUM;
98
      map[n++] = sn;
152,317✔
99
    }
100
  }
101
  return n;
69,778✔
102
}
103

104
/* Add frame links at the end of the snapshot. */
105
static MSize snapshot_framelinks(jit_State *J, SnapEntry *map, uint8_t *topslot)
69,778✔
106
{
107
  cTValue *frame = J->L->base - 1;
69,778✔
108
  cTValue *lim = J->L->base - J->baseslot + LJ_FR2;
69,778✔
109
  GCfunc *fn = frame_func(frame);
69,778✔
110
  cTValue *ftop = isluafunc(fn) ? (frame+funcproto(fn)->framesize) : J->L->top;
69,778✔
111
#if LJ_FR2
112
  uint64_t pcbase = (u64ptr(J->pc) << 8) | (J->baseslot - 2);
69,778✔
113
  lj_assertJ(2 <= J->baseslot && J->baseslot <= 257, "bad baseslot");
69,778✔
114
  memcpy(map, &pcbase, sizeof(uint64_t));
69,778✔
115
#else
116
  MSize f = 0;
117
  map[f++] = SNAP_MKPC(J->pc);  /* The current PC is always the first entry. */
118
#endif
119
  while (frame > lim) {  /* Backwards traversal of all frames above base. */
80,723✔
120
    if (frame_islua(frame)) {
10,945✔
121
#if !LJ_FR2
122
      map[f++] = SNAP_MKPC(frame_pc(frame));
123
#endif
124
      frame = frame_prevl(frame);
10,304✔
125
    } else if (frame_iscont(frame)) {
641✔
126
#if !LJ_FR2
127
      map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
128
      map[f++] = SNAP_MKPC(frame_contpc(frame));
129
#endif
130
      frame = frame_prevd(frame);
189✔
131
    } else {
132
      lj_assertJ(!frame_isc(frame), "broken frame chain");
452✔
133
#if !LJ_FR2
134
      map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
135
#endif
136
      frame = frame_prevd(frame);
452✔
137
      continue;
452✔
138
    }
139
    if (frame + funcproto(frame_func(frame))->framesize > ftop)
10,493✔
140
      ftop = frame + funcproto(frame_func(frame))->framesize;
141
  }
142
  *topslot = (uint8_t)(ftop - lim);
69,778✔
143
#if LJ_FR2
144
  lj_assertJ(sizeof(SnapEntry) * 2 == sizeof(uint64_t), "bad SnapEntry def");
69,778✔
145
  return 2;
69,778✔
146
#else
147
  lj_assertJ(f == (MSize)(1 + J->framedepth), "miscalculated snapshot size");
148
  return f;
149
#endif
150
}
151

152
/* Take a snapshot of the current stack. */
153
static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap)
69,778✔
154
{
155
  BCReg nslots = J->baseslot + J->maxslot;
69,778✔
156
  MSize nent;
69,778✔
157
  SnapEntry *p;
69,778✔
158
  /* Conservative estimate. */
159
  lj_snap_grow_map(J, nsnapmap + nslots + (MSize)(LJ_FR2?2:J->framedepth+1));
69,778✔
160
  p = &J->cur.snapmap[nsnapmap];
69,778✔
161
  nent = snapshot_slots(J, p, nslots);
69,778✔
162
  snap->nent = (uint8_t)nent;
69,778✔
163
  nent += snapshot_framelinks(J, p + nent, &snap->topslot);
69,778✔
164
  snap->mapofs = (uint32_t)nsnapmap;
69,778✔
165
  snap->ref = (IRRef1)J->cur.nins;
69,778✔
166
  snap->mcofs = 0;
69,778✔
167
  snap->nslots = (uint8_t)nslots;
69,778✔
168
  snap->count = 0;
69,778✔
169
  J->cur.nsnapmap = (uint32_t)(nsnapmap + nent);
69,778✔
170
}
69,778✔
171

172
/* Add or merge a snapshot. */
173
void lj_snap_add(jit_State *J)
69,778✔
174
{
175
  MSize nsnap = J->cur.nsnap;
69,778✔
176
  MSize nsnapmap = J->cur.nsnapmap;
69,778✔
177
  /* Merge if no ins. inbetween or if requested and no guard inbetween. */
178
  if ((nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins) ||
69,778✔
179
      (J->mergesnap && !irt_isguard(J->guardemit))) {
65,152✔
180
    if (nsnap == 1) {  /* But preserve snap #0 PC. */
8,210✔
181
      emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
208✔
182
      goto nomerge;
208✔
183
    }
184
    nsnapmap = J->cur.snap[--nsnap].mapofs;
8,002✔
185
  } else {
186
  nomerge:
61,568✔
187
    lj_snap_grow_buf(J, nsnap+1);
61,776✔
188
    J->cur.nsnap = (uint16_t)(nsnap+1);
61,776✔
189
  }
190
  J->mergesnap = 0;
69,778✔
191
  J->guardemit.irt = 0;
69,778✔
192
  snapshot_stack(J, &J->cur.snap[nsnap], nsnapmap);
69,778✔
193
}
69,778✔
194

195
/* -- Snapshot modification ----------------------------------------------- */
196

197
#define SNAP_USEDEF_SLOTS        (LJ_MAX_JSLOTS+LJ_STACK_EXTRA)
198

199
/* Find unused slots with reaching-definitions bytecode data-flow analysis. */
200
static BCReg snap_usedef(jit_State *J, uint8_t *udf,
201
                         const BCIns *pc, BCReg maxslot)
202
{
203
  BCReg s;
204
  GCobj *o;
205

206
  if (maxslot == 0) return 0;
207
#ifdef LUAJIT_USE_VALGRIND
208
  /* Avoid errors for harmless reads beyond maxslot. */
209
  memset(udf, 1, SNAP_USEDEF_SLOTS);
210
#else
211
  memset(udf, 1, maxslot);
212
#endif
213

214
  /* Treat open upvalues as used. */
215
  o = gcref(J->L->openupval);
216
  while (o) {
217
    if (uvval(gco2uv(o)) < J->L->base) break;
218
    udf[uvval(gco2uv(o)) - J->L->base] = 0;
219
    o = gcref(o->gch.nextgc);
220
  }
221

222
#define USE_SLOT(s)                udf[(s)] &= ~1
223
#define DEF_SLOT(s)                udf[(s)] *= 3
224

225
  /* Scan through following bytecode and check for uses/defs. */
226
  lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
227
             "snapshot PC out of range");
228
  for (;;) {
229
    BCIns ins = *pc++;
230
    BCOp op = bc_op(ins);
231
    switch (bcmode_b(op)) {
232
    case BCMvar: USE_SLOT(bc_b(ins)); break;
233
    default: break;
234
    }
235
    switch (bcmode_c(op)) {
236
    case BCMvar: USE_SLOT(bc_c(ins)); break;
237
    case BCMrbase:
238
      lj_assertJ(op == BC_CAT, "unhandled op %d with RC rbase", op);
239
      for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s);
240
      for (; s < maxslot; s++) DEF_SLOT(s);
241
      break;
242
    case BCMjump:
243
    handle_jump: {
244
      BCReg minslot = bc_a(ins);
245
      if (op >= BC_FORI && op <= BC_JFORL) minslot += FORL_EXT;
246
      else if (op >= BC_ITERL && op <= BC_JITERL) minslot += bc_b(pc[-2])-1;
247
      else if (op == BC_UCLO) { pc += bc_j(ins); break; }
248
      for (s = minslot; s < maxslot; s++) DEF_SLOT(s);
249
      return minslot < maxslot ? minslot : maxslot;
250
      }
251
    case BCMlit:
252
      if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
253
        goto handle_jump;
254
      } else if (bc_isret(op)) {
255
        BCReg top = op == BC_RETM ? maxslot : (bc_a(ins) + bc_d(ins)-1);
256
        for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
257
        for (; s < top; s++) USE_SLOT(s);
258
        for (; s < maxslot; s++) DEF_SLOT(s);
259
        return 0;
260
      }
261
      break;
262
    case BCMfunc: return maxslot;  /* NYI: will abort, anyway. */
263
    default: break;
264
    }
265
    switch (bcmode_a(op)) {
266
    case BCMvar: USE_SLOT(bc_a(ins)); break;
267
    case BCMdst:
268
       if (!(op == BC_ISTC || op == BC_ISFC)) DEF_SLOT(bc_a(ins));
269
       break;
270
    case BCMbase:
271
      if (op >= BC_CALLM && op <= BC_ITERN) {
272
        BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ?
273
                    maxslot : (bc_a(ins) + bc_c(ins)+LJ_FR2);
274
        if (LJ_FR2) DEF_SLOT(bc_a(ins)+1);
275
        s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0);
276
        for (; s < top; s++) USE_SLOT(s);
277
        for (; s < maxslot; s++) DEF_SLOT(s);
278
        if (op == BC_CALLT || op == BC_CALLMT) {
279
          for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
280
          return 0;
281
        }
282
      } else if (op == BC_VARG) {
283
        return maxslot;  /* NYI: punt. */
284
      } else if (op == BC_KNIL) {
285
        for (s = bc_a(ins); s <= bc_d(ins); s++) DEF_SLOT(s);
286
      } else if (op == BC_TSETM) {
287
        for (s = bc_a(ins)-1; s < maxslot; s++) USE_SLOT(s);
288
      }
289
      break;
290
    default: break;
291
    }
292
    lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
293
               "use/def analysis PC out of range");
294
  }
295

296
#undef USE_SLOT
297
#undef DEF_SLOT
298

299
  return 0;  /* unreachable */
300
}
301

302
/* Purge dead slots before the next snapshot. */
303
void lj_snap_purge(jit_State *J)
53,060✔
304
{
305
  uint8_t udf[SNAP_USEDEF_SLOTS];
53,060✔
306
  BCReg s, maxslot = J->maxslot;
53,060✔
307
  if (bc_op(*J->pc) == BC_FUNCV && maxslot > J->pt->numparams)
53,060✔
308
    maxslot = J->pt->numparams;
309
  s = snap_usedef(J, udf, J->pc, maxslot);
53,060✔
310
  for (; s < maxslot; s++)
208,550✔
311
    if (udf[s] != 0)
102,430✔
312
      J->base[s] = 0;  /* Purge dead slots. */
95,107✔
313
}
53,060✔
314

315
/* Shrink last snapshot. */
316
void lj_snap_shrink(jit_State *J)
3,477✔
317
{
318
  SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
3,477✔
319
  SnapEntry *map = &J->cur.snapmap[snap->mapofs];
3,477✔
320
  MSize n, m, nlim, nent = snap->nent;
3,477✔
321
  uint8_t udf[SNAP_USEDEF_SLOTS];
3,477✔
322
  BCReg maxslot = J->maxslot;
3,477✔
323
  BCReg baseslot = J->baseslot;
3,477✔
324
  BCReg minslot = snap_usedef(J, udf, snap_pc(&map[nent]), maxslot);
3,477✔
325
  maxslot += baseslot;
3,477✔
326
  minslot += baseslot;
3,477✔
327
  snap->nslots = (uint8_t)maxslot;
3,477✔
328
  for (n = m = 0; n < nent; n++) {  /* Remove unused slots from snapshot. */
26,542✔
329
    BCReg s = snap_slot(map[n]);
23,065✔
330
    if (s < minslot || (s < maxslot && udf[s-baseslot] == 0))
23,065✔
331
      map[m++] = map[n];  /* Only copy used slots. */
19,525✔
332
  }
333
  snap->nent = (uint8_t)m;
3,477✔
334
  nlim = J->cur.nsnapmap - snap->mapofs - 1;
3,477✔
335
  while (n <= nlim) map[m++] = map[n++];  /* Move PC + frame links down. */
10,431✔
336
  J->cur.nsnapmap = (uint32_t)(snap->mapofs + m);  /* Free up space in map. */
3,477✔
337
}
3,477✔
338

339
/* -- Snapshot access ----------------------------------------------------- */
340

341
/* Initialize a Bloom Filter with all renamed refs.
342
** There are very few renames (often none), so the filter has
343
** very few bits set. This makes it suitable for negative filtering.
344
*/
345
static BloomFilter snap_renamefilter(GCtrace *T, SnapNo lim)
15,919✔
346
{
347
  BloomFilter rfilt = 0;
15,919✔
348
  IRIns *ir;
15,919✔
349
  for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
18,146✔
350
    if (ir->op2 <= lim)
2,227✔
351
      bloomset(rfilt, ir->op1);
1,234✔
352
  return rfilt;
14,073✔
353
}
354

355
/* Process matching renames to find the original RegSP. */
356
static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs)
874✔
357
{
358
  IRIns *ir;
874✔
359
  for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
4,100✔
360
    if (ir->op1 == ref && ir->op2 <= lim)
3,226✔
361
      rs = ir->prev;
941✔
362
  return rs;
363
}
364

365
/* Copy RegSP from parent snapshot to the parent links of the IR. */
366
IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno, IRIns *ir)
1,846✔
367
{
368
  SnapShot *snap = &T->snap[snapno];
1,846✔
369
  SnapEntry *map = &T->snapmap[snap->mapofs];
1,846✔
370
  BloomFilter rfilt = snap_renamefilter(T, snapno);
1,846✔
371
  MSize n = 0;
372
  IRRef ref = 0;
5,456✔
373
  UNUSED(J);
5,456✔
374
  for ( ; ; ir++) {
9,066✔
375
    uint32_t rs;
5,456✔
376
    if (ir->o == IR_SLOAD) {
5,456✔
377
      if (!(ir->op2 & IRSLOAD_PARENT)) break;
3,394✔
378
      for ( ; ; n++) {
6,886✔
379
        lj_assertJ(n < snap->nent, "slot %d not found in snapshot", ir->op1);
2,385✔
380
        if (snap_slot(map[n]) == ir->op1) {
4,501✔
381
          ref = snap_ref(map[n++]);
2,116✔
382
          break;
2,116✔
383
        }
384
      }
385
    } else if (LJ_SOFTFP32 && ir->o == IR_HIOP) {
2,062✔
386
      ref++;
387
    } else if (ir->o == IR_PVAL) {
2,062✔
388
      ref = ir->op1 + REF_BIAS;
1,494✔
389
    } else {
390
      break;
391
    }
392
    rs = T->ir[ref].prev;
3,610✔
393
    if (bloomtest(rfilt, ref))
3,610✔
394
      rs = snap_renameref(T, snapno, ref, rs);
49✔
395
    ir->prev = (uint16_t)rs;
3,610✔
396
    lj_assertJ(regsp_used(rs), "unused IR %04d in snapshot", ref - REF_BIAS);
3,610✔
397
  }
398
  return ir;
1,846✔
399
}
400

401
/* -- Snapshot replay ----------------------------------------------------- */
402

403
/* Replay constant from parent trace. */
404
static TRef snap_replay_const(jit_State *J, IRIns *ir)
4,783✔
405
{
406
  /* Only have to deal with constants that can occur in stack slots. */
407
  switch ((IROp)ir->o) {
4,783✔
408
  case IR_KPRI: return TREF_PRI(irt_type(ir->t));
38✔
409
  case IR_KINT: return lj_ir_kint(J, ir->i);
3,078✔
410
  case IR_KGC: return lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t));
1,135✔
411
  case IR_KNUM: case IR_KINT64:
532✔
412
    return lj_ir_k64(J, (IROp)ir->o, ir_k64(ir)->u64);
532✔
413
  case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir));  /* Continuation. */
×
414
  default: lj_assertJ(0, "bad IR constant op %d", ir->o); return TREF_NIL;
415
  }
416
}
417

418
/* De-duplicate parent reference. */
419
static TRef snap_dedup(jit_State *J, SnapEntry *map, MSize nmax, IRRef ref)
420
{
421
  MSize j;
422
  for (j = 0; j < nmax; j++)
7,505✔
423
    if (snap_ref(map[j]) == ref)
7,290✔
424
      return J->slot[snap_slot(map[j])] & ~(SNAP_CONT|SNAP_FRAME);
375✔
425
  return 0;
426
}
427

428
/* Emit parent reference with de-duplication. */
429
static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax,
430
                      BloomFilter seen, IRRef ref)
431
{
432
  IRIns *ir = &T->ir[ref];
433
  TRef tr;
434
  if (irref_isk(ref))
435
    tr = snap_replay_const(J, ir);
436
  else if (!regsp_used(ir->prev))
437
    tr = 0;
438
  else if (!bloomtest(seen, ref) || (tr = snap_dedup(J, map, nmax, ref)) == 0)
439
    tr = emitir(IRT(IR_PVAL, irt_type(ir->t)), ref - REF_BIAS, 0);
440
  return tr;
441
}
442

443
/* Check whether a sunk store corresponds to an allocation. Slow path. */
444
static int snap_sunk_store2(GCtrace *T, IRIns *ira, IRIns *irs)
445
{
446
  if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
447
      irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
448
    IRIns *irk = &T->ir[irs->op1];
449
    if (irk->o == IR_AREF || irk->o == IR_HREFK)
450
      irk = &T->ir[irk->op1];
451
    return (&T->ir[irk->op1] == ira);
452
  }
453
  return 0;
454
}
455

456
/* Check whether a sunk store corresponds to an allocation. Fast path. */
457
static LJ_AINLINE int snap_sunk_store(GCtrace *T, IRIns *ira, IRIns *irs)
52✔
458
{
459
  if (irs->s != 255)
52✔
460
    return (ira + irs->s == irs);  /* Fast check. */
52✔
461
  return snap_sunk_store2(T, ira, irs);
×
462
}
463

464
/* Replay snapshot state to setup side trace. */
465
void lj_snap_replay(jit_State *J, GCtrace *T)
1,645✔
466
{
467
  SnapShot *snap = &T->snap[J->exitno];
1,645✔
468
  SnapEntry *map = &T->snapmap[snap->mapofs];
1,645✔
469
  MSize n, nent = snap->nent;
1,645✔
470
  BloomFilter seen = 0;
1,645✔
471
  int pass23 = 0;
1,645✔
472
  J->framedepth = 0;
1,645✔
473
  /* Emit IR for slots inherited from parent snapshot. */
474
  for (n = 0; n < nent; n++) {
7,575✔
475
    SnapEntry sn = map[n];
5,930✔
476
    BCReg s = snap_slot(sn);
5,930✔
477
    IRRef ref = snap_ref(sn);
5,930✔
478
    IRIns *ir = &T->ir[ref];
5,930✔
479
    TRef tr;
5,930✔
480
    /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */
481
    if (bloomtest(seen, ref) && (tr = snap_dedup(J, map, n, ref)) != 0)
6,520✔
482
      goto setslot;
375✔
483
    bloomset(seen, ref);
5,555✔
484
    if (irref_isk(ref)) {
5,555✔
485
      /* See special treatment of LJ_FR2 slot 1 in snapshot_slots() above. */
486
      if (LJ_FR2 && (sn == SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL)))
1,864✔
487
        tr = 0;
488
      else
489
        tr = snap_replay_const(J, ir);
1,786✔
490
    } else if (!regsp_used(ir->prev)) {
3,691✔
491
      pass23 = 1;
492
      lj_assertJ(s != 0, "unused slot 0 in snapshot");
493
      tr = s;
494
    } else {
495
      IRType t = irt_type(ir->t);
2,198✔
496
      uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT;
2,198✔
497
      if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM;
2,198✔
498
      if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY);
2,198✔
499
      tr = emitir_raw(IRT(IR_SLOAD, t), s, mode);
2,198✔
500
    }
501
  setslot:
5,930✔
502
    J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME));  /* Same as TREF_* flags. */
5,930✔
503
    J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && (s != LJ_FR2));
5,930✔
504
    if ((sn & SNAP_FRAME))
5,930✔
505
      J->baseslot = s+1;
630✔
506
  }
507
  if (pass23) {
1,645✔
508
    IRIns *irlast = &T->ir[snap->ref];
102✔
509
    pass23 = 0;
102✔
510
    /* Emit dependent PVALs. */
511
    for (n = 0; n < nent; n++) {
2,508✔
512
      SnapEntry sn = map[n];
2,406✔
513
      IRRef refp = snap_ref(sn);
2,406✔
514
      IRIns *ir = &T->ir[refp];
2,406✔
515
      if (regsp_reg(ir->r) == RID_SUNK) {
2,406✔
516
        if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue;
1,491✔
517
        pass23 = 1;
1,491✔
518
        lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
1,491✔
519
                   ir->o == IR_CNEW || ir->o == IR_CNEWI,
520
                   "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
521
        if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1);
1,491✔
522
        if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2);
1,491✔
523
        if (LJ_HASFFI && ir->o == IR_CNEWI) {
1,491✔
524
          if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP)
525
            snap_pref(J, T, map, nent, seen, (ir+1)->op2);
526
        } else {
527
          IRIns *irs;
10✔
528
          for (irs = ir+1; irs < irlast; irs++)
177✔
529
            if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
193✔
530
              if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
18✔
531
                snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
5✔
532
              else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
533
                       irs+1 < irlast && (irs+1)->o == IR_HIOP)
534
                snap_pref(J, T, map, nent, seen, (irs+1)->op2);
535
            }
536
        }
537
      } else if (!irref_isk(refp) && !regsp_used(ir->prev)) {
915✔
538
        lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
2✔
539
                   "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
540
        J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1);
2✔
541
      }
542
    }
543
    /* Replay sunk instructions. */
544
    for (n = 0; pass23 && n < nent; n++) {
2,505✔
545
      SnapEntry sn = map[n];
2,403✔
546
      IRRef refp = snap_ref(sn);
2,403✔
547
      IRIns *ir = &T->ir[refp];
2,403✔
548
      if (regsp_reg(ir->r) == RID_SUNK) {
2,403✔
549
        TRef op1, op2;
1,491✔
550
        if (J->slot[snap_slot(sn)] != snap_slot(sn)) {  /* De-dup allocs. */
1,491✔
UNCOV
551
          J->slot[snap_slot(sn)] = J->slot[J->slot[snap_slot(sn)]];
×
UNCOV
552
          continue;
×
553
        }
554
        op1 = ir->op1;
1,491✔
555
        if (op1 >= T->nk) op1 = snap_pref(J, T, map, nent, seen, op1);
1,491✔
556
        op2 = ir->op2;
1,491✔
557
        if (op2 >= T->nk) op2 = snap_pref(J, T, map, nent, seen, op2);
1,491✔
558
        if (LJ_HASFFI && ir->o == IR_CNEWI) {
1,491✔
559
          if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) {
1,481✔
560
            lj_needsplit(J);  /* Emit joining HIOP. */
561
            op2 = emitir_raw(IRT(IR_HIOP, IRT_I64), op2,
562
                             snap_pref(J, T, map, nent, seen, (ir+1)->op2));
563
          }
564
          J->slot[snap_slot(sn)] = emitir(ir->ot & ~(IRT_MARK|IRT_ISPHI), op1, op2);
1,481✔
565
        } else {
566
          IRIns *irs;
10✔
567
          TRef tr = emitir(ir->ot, op1, op2);
10✔
568
          J->slot[snap_slot(sn)] = tr;
10✔
569
          for (irs = ir+1; irs < irlast; irs++)
177✔
570
            if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
211✔
571
              IRIns *irr = &T->ir[irs->op1];
18✔
572
              TRef val, key = irr->op2, tmp = tr;
18✔
573
              if (irr->o != IR_FREF) {
18✔
574
                IRIns *irk = &T->ir[key];
17✔
575
                if (irr->o == IR_HREFK)
17✔
576
                  key = lj_ir_kslot(J, snap_replay_const(J, &T->ir[irk->op1]),
2✔
577
                                    irk->op2);
2✔
578
                else
579
                  key = snap_replay_const(J, irk);
15✔
580
                if (irr->o == IR_HREFK || irr->o == IR_AREF) {
17✔
581
                  IRIns *irf = &T->ir[irr->op1];
9✔
582
                  tmp = emitir(irf->ot, tmp, irf->op2);
9✔
583
                }
584
              }
585
              tmp = emitir(irr->ot, tmp, key);
18✔
586
              val = snap_pref(J, T, map, nent, seen, irs->op2);
18✔
587
              if (val == 0) {
18✔
588
                IRIns *irc = &T->ir[irs->op2];
5✔
589
                lj_assertJ(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT,
5✔
590
                           "sunk store for parent IR %04d with bad op %d",
591
                           refp - REF_BIAS, irc->o);
592
                val = snap_pref(J, T, map, nent, seen, irc->op1);
5✔
593
                val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
5✔
594
              } else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
595
                         irs+1 < irlast && (irs+1)->o == IR_HIOP) {
596
                IRType t = IRT_I64;
597
                if (LJ_SOFTFP32 && irt_type((irs+1)->t) == IRT_SOFTFP)
598
                  t = IRT_NUM;
599
                lj_needsplit(J);
600
                if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) {
601
                  uint64_t k = (uint32_t)T->ir[irs->op2].i +
602
                               ((uint64_t)T->ir[(irs+1)->op2].i << 32);
603
                  val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM, k);
604
                } else {
605
                  val = emitir_raw(IRT(IR_HIOP, t), val,
606
                          snap_pref(J, T, map, nent, seen, (irs+1)->op2));
607
                }
608
                tmp = emitir(IRT(irs->o, t), tmp, val);
609
                continue;
610
              }
611
              tmp = emitir(irs->ot, tmp, val);
18✔
612
            } else if (LJ_HASFFI && irs->o == IR_XBAR && ir->o == IR_CNEW) {
149✔
613
              emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
×
614
            }
615
        }
616
      }
617
    }
618
  }
619
  J->base = J->slot + J->baseslot;
1,645✔
620
  J->maxslot = snap->nslots - J->baseslot;
1,645✔
621
  lj_snap_add(J);
1,645✔
622
  if (pass23)  /* Need explicit GC step _after_ initial snapshot. */
1,645✔
623
    emitir_raw(IRTG(IR_GCSTEP, IRT_NIL), 0, 0);
101✔
624
}
1,645✔
625

626
/* -- Snapshot restore ---------------------------------------------------- */
627

628
static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
629
                        SnapNo snapno, BloomFilter rfilt,
630
                        IRIns *ir, TValue *o);
631

632
/* Restore a value from the trace exit state. */
633
static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex,
43,168✔
634
                            SnapNo snapno, BloomFilter rfilt,
635
                            IRRef ref, TValue *o)
636
{
637
  IRIns *ir = &T->ir[ref];
43,237✔
638
  IRType1 t = ir->t;
43,237✔
639
  RegSP rs = ir->prev;
43,237✔
640
  if (irref_isk(ref)) {  /* Restore constant slot. */
43,237✔
641
    if (ir->o == IR_KPTR) {
23,523✔
642
      o->u64 = (uint64_t)(uintptr_t)ir_kptr(ir);
×
643
    } else {
644
      lj_assertJ(!(ir->o == IR_KKPTR || ir->o == IR_KNULL),
23,523✔
645
                 "restore of const from IR %04d with bad op %d",
646
                 ref - REF_BIAS, ir->o);
647
      lj_ir_kvalue(J->L, o, ir);
23,523✔
648
    }
649
    return;
23,523✔
650
  }
651
  if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
19,714✔
652
    rs = snap_renameref(T, snapno, ref, rs);
825✔
653
  if (ra_hasspill(regsp_spill(rs))) {  /* Restore from spill slot. */
19,714✔
654
    int32_t *sps = &ex->spill[regsp_spill(rs)];
2,292✔
655
    if (irt_isinteger(t)) {
2,292✔
656
      setintV(o, *sps);
373✔
657
#if !LJ_SOFTFP32
658
    } else if (irt_isnum(t)) {
1,919✔
659
      o->u64 = *(uint64_t *)sps;
1,280✔
660
#endif
661
#if LJ_64 && !LJ_GC64
662
    } else if (irt_islightud(t)) {
663
      /* 64 bit lightuserdata which may escape already has the tag bits. */
664
      o->u64 = *(uint64_t *)sps;
665
#endif
666
    } else {
667
      lj_assertJ(!irt_ispri(t), "PRI ref with spill slot");
639✔
668
      setgcV(J->L, o, (GCobj *)(uintptr_t)*(GCSize *)sps, irt_toitype(t));
639✔
669
    }
670
  } else {  /* Restore from register. */
671
    Reg r = regsp_reg(rs);
17,422✔
672
    if (ra_noreg(r)) {
17,422✔
673
      lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
69✔
674
                 "restore from IR %04d has no reg", ref - REF_BIAS);
675
      snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o);
69✔
676
      if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o));
677
      return;
69✔
678
    } else if (irt_isinteger(t)) {
17,353✔
679
      setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]);
3,707✔
680
#if !LJ_SOFTFP
681
    } else if (irt_isnum(t)) {
13,646✔
682
      setnumV(o, ex->fpr[r-RID_MIN_FPR]);
4,453✔
683
#elif LJ_64  /* && LJ_SOFTFP */
684
    } else if (irt_isnum(t)) {
685
      o->u64 = ex->gpr[r-RID_MIN_GPR];
686
#endif
687
#if LJ_64 && !LJ_GC64
688
    } else if (irt_is64(t)) {
689
      /* 64 bit values that already have the tag bits. */
690
      o->u64 = ex->gpr[r-RID_MIN_GPR];
691
#endif
692
    } else if (irt_ispri(t)) {
9,193✔
693
      setpriV(o, irt_toitype(t));
×
694
    } else {
695
      setgcV(J->L, o, (GCobj *)ex->gpr[r-RID_MIN_GPR], irt_toitype(t));
9,193✔
696
    }
697
  }
698
}
699

700
#if LJ_HASFFI
701
/* Restore raw data from the trace exit state. */
702
static void snap_restoredata(jit_State *J, GCtrace *T, ExitState *ex,
1,860✔
703
                             SnapNo snapno, BloomFilter rfilt,
704
                             IRRef ref, void *dst, CTSize sz)
705
{
706
  IRIns *ir = &T->ir[ref];
1,860✔
707
  RegSP rs = ir->prev;
1,860✔
708
  int32_t *src;
1,860✔
709
  uint64_t tmp;
1,860✔
710
  UNUSED(J);
1,860✔
711
  if (irref_isk(ref)) {
1,860✔
712
    if (ir_isk64(ir)) {
3✔
713
      src = (int32_t *)&ir[1];
2✔
714
    } else if (sz == 8) {
1✔
715
      tmp = (uint64_t)(uint32_t)ir->i;
×
716
      src = (int32_t *)&tmp;
×
717
    } else {
718
      src = &ir->i;
1✔
719
    }
720
  } else {
721
    if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
1,857✔
722
      rs = snap_renameref(T, snapno, ref, rs);
×
723
    if (ra_hasspill(regsp_spill(rs))) {
1,857✔
724
      src = &ex->spill[regsp_spill(rs)];
818✔
725
      if (sz == 8 && !irt_is64(ir->t)) {
818✔
726
        tmp = (uint64_t)(uint32_t)*src;
×
727
        src = (int32_t *)&tmp;
×
728
      }
729
    } else {
730
      Reg r = regsp_reg(rs);
1,039✔
731
      if (ra_noreg(r)) {
1,039✔
732
        /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */
733
        lj_assertJ(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
13✔
734
                   "restore from IR %04d has no reg", ref - REF_BIAS);
735
        snap_restoredata(J, T, ex, snapno, rfilt, ir->op1, dst, 4);
13✔
736
        *(lua_Number *)dst = (lua_Number)*(int32_t *)dst;
13✔
737
        return;
13✔
738
      }
739
      src = (int32_t *)&ex->gpr[r-RID_MIN_GPR];
1,026✔
740
#if !LJ_SOFTFP
741
      if (r >= RID_MAX_GPR) {
1,026✔
742
        src = (int32_t *)&ex->fpr[r-RID_MIN_FPR];
16✔
743
#if LJ_TARGET_PPC
744
        if (sz == 4) {  /* PPC FPRs are always doubles. */
745
          *(float *)dst = (float)*(double *)src;
746
          return;
747
        }
748
#else
749
        if (LJ_BE && sz == 4) src++;
16✔
750
#endif
751
      } else
752
#endif
753
      if (LJ_64 && LJ_BE && sz == 4) src++;
754
    }
755
  }
756
  lj_assertJ(sz == 1 || sz == 2 || sz == 4 || sz == 8,
1,847✔
757
             "restore from IR %04d with bad size %d", ref - REF_BIAS, sz);
758
  if (sz == 4) *(int32_t *)dst = *src;
1,847✔
759
  else if (sz == 8) *(int64_t *)dst = *(int64_t *)src;
1,797✔
760
  else if (sz == 1) *(int8_t *)dst = (int8_t)*src;
×
761
  else *(int16_t *)dst = (int16_t)*src;
×
762
}
763
#endif
764

765
/* Unsink allocation from the trace exit state. Unsink sunk stores. */
766
static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
767
                        SnapNo snapno, BloomFilter rfilt,
768
                        IRIns *ir, TValue *o)
769
{
770
  lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
771
             ir->o == IR_CNEW || ir->o == IR_CNEWI,
772
             "sunk allocation with bad op %d", ir->o);
773
#if LJ_HASFFI
774
  if (ir->o == IR_CNEW || ir->o == IR_CNEWI) {
775
    CTState *cts = ctype_cts(J->L);
776
    CTypeID id = (CTypeID)T->ir[ir->op1].i;
777
    CTSize sz;
778
    CTInfo info = lj_ctype_info(cts, id, &sz);
779
    GCcdata *cd = lj_cdata_newx(cts, id, sz, info);
780
    setcdataV(J->L, o, cd);
781
    if (ir->o == IR_CNEWI) {
782
      uint8_t *p = (uint8_t *)cdataptr(cd);
783
      lj_assertJ(sz == 4 || sz == 8, "sunk cdata with bad size %d", sz);
784
      if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) {
785
        snap_restoredata(J, T, ex, snapno, rfilt, (ir+1)->op2,
786
                         LJ_LE ? p+4 : p, 4);
787
        if (LJ_BE) p += 4;
788
        sz = 4;
789
      }
790
      snap_restoredata(J, T, ex, snapno, rfilt, ir->op2, p, sz);
791
    } else {
792
      IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
793
      for (irs = ir+1; irs < irlast; irs++)
794
        if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
795
          IRIns *iro = &T->ir[T->ir[irs->op1].op2];
796
          uint8_t *p = (uint8_t *)cd;
797
          CTSize szs;
798
          lj_assertJ(irs->o == IR_XSTORE, "sunk store with bad op %d", irs->o);
799
          lj_assertJ(T->ir[irs->op1].o == IR_ADD,
800
                     "sunk store with bad add op %d", T->ir[irs->op1].o);
801
          lj_assertJ(iro->o == IR_KINT || iro->o == IR_KINT64,
802
                     "sunk store with bad const offset op %d", iro->o);
803
          if (irt_is64(irs->t)) szs = 8;
804
          else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1;
805
          else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2;
806
          else szs = 4;
807
          if (LJ_64 && iro->o == IR_KINT64)
808
            p += (int64_t)ir_k64(iro)->u64;
809
          else
810
            p += iro->i;
811
          lj_assertJ(p >= (uint8_t *)cdataptr(cd) &&
812
                     p + szs <= (uint8_t *)cdataptr(cd) + sz,
813
                     "sunk store with offset out of range");
814
          if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
815
            lj_assertJ(szs == 4, "sunk store with bad size %d", szs);
816
            snap_restoredata(J, T, ex, snapno, rfilt, (irs+1)->op2,
817
                             LJ_LE ? p+4 : p, 4);
818
            if (LJ_BE) p += 4;
819
          }
820
          snap_restoredata(J, T, ex, snapno, rfilt, irs->op2, p, szs);
821
        }
822
    }
823
  } else
824
#endif
825
  {
826
    IRIns *irs, *irlast;
827
    GCtab *t = ir->o == IR_TNEW ? lj_tab_new(J->L, ir->op1, ir->op2) :
828
                                  lj_tab_dup(J->L, ir_ktab(&T->ir[ir->op1]));
829
    settabV(J->L, o, t);
830
    irlast = &T->ir[T->snap[snapno].ref];
831
    for (irs = ir+1; irs < irlast; irs++)
832
      if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
833
        IRIns *irk = &T->ir[irs->op1];
834
        TValue tmp, *val;
835
        lj_assertJ(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
836
                   irs->o == IR_FSTORE,
837
                   "sunk store with bad op %d", irs->o);
838
        if (irk->o == IR_FREF) {
839
          switch (irk->op2) {
840
          case IRFL_TAB_META:
841
            snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp);
842
            /* NOBARRIER: The table is new (marked white). */
843
            setgcref(t->metatable, obj2gco(tabV(&tmp)));
844
            break;
845
          case IRFL_TAB_NOMM:
846
            /* Negative metamethod cache invalidated by lj_tab_set() below. */
847
            break;
848
          default:
849
            lj_assertJ(0, "sunk store with bad field %d", irk->op2);
850
            break;
851
          }
852
        } else {
853
          irk = &T->ir[irk->op2];
854
          if (irk->o == IR_KSLOT) irk = &T->ir[irk->op1];
855
          lj_ir_kvalue(J->L, &tmp, irk);
856
          val = lj_tab_set(J->L, t, &tmp);
857
          /* NOBARRIER: The table is new (marked white). */
858
          snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, val);
859
          if (LJ_SOFTFP32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
860
            snap_restoreval(J, T, ex, snapno, rfilt, (irs+1)->op2, &tmp);
861
            val->u32.hi = tmp.u32.lo;
862
          }
863
        }
864
      }
865
  }
866
}
867

868
/* Restore interpreter state from exit state with the help of a snapshot. */
869
const BCIns *lj_snap_restore(jit_State *J, void *exptr)
14,073✔
870
{
871
  ExitState *ex = (ExitState *)exptr;
14,073✔
872
  SnapNo snapno = J->exitno;  /* For now, snapno == exitno. */
14,073✔
873
  GCtrace *T = traceref(J, J->parent);
14,073✔
874
  SnapShot *snap = &T->snap[snapno];
14,073✔
875
  MSize n, nent = snap->nent;
14,073✔
876
  SnapEntry *map = &T->snapmap[snap->mapofs];
14,073✔
877
#if !LJ_FR2 || defined(LUA_USE_ASSERT)
878
  SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1-LJ_FR2];
879
#endif
880
#if !LJ_FR2
881
  ptrdiff_t ftsz0;
882
#endif
883
  TValue *frame;
14,073✔
884
  BloomFilter rfilt = snap_renamefilter(T, snapno);
14,073✔
885
  const BCIns *pc = snap_pc(&map[nent]);
14,073✔
886
  lua_State *L = J->L;
14,073✔
887

888
  /* Set interpreter PC to the next PC to get correct error messages. */
889
  setcframe_pc(cframe_raw(L->cframe), pc+1);
14,073✔
890

891
  /* Make sure the stack is big enough for the slots from the snapshot. */
892
  if (LJ_UNLIKELY(L->base + snap->topslot >= tvref(L->maxstack))) {
14,073✔
893
    L->top = curr_topL(L);
16✔
894
    lj_state_growstack(L, snap->topslot - curr_proto(L)->framesize);
16✔
895
  }
896

897
  /* Fill stack slots with data from the registers and spill slots. */
898
  frame = L->base-1-LJ_FR2;
14,072✔
899
#if !LJ_FR2
900
  ftsz0 = frame_ftsz(frame);  /* Preserve link to previous frame in slot #0. */
901
#endif
902
  for (n = 0; n < nent; n++) {
61,044✔
903
    SnapEntry sn = map[n];
46,972✔
904
    if (!(sn & SNAP_NORESTORE)) {
46,972✔
905
      TValue *o = &frame[snap_slot(sn)];
44,858✔
906
      IRRef ref = snap_ref(sn);
44,858✔
907
      IRIns *ir = &T->ir[ref];
44,858✔
908
      if (ir->r == RID_SUNK) {
44,858✔
909
        MSize j;
910
        for (j = 0; j < n; j++)
30,569✔
911
          if (snap_ref(map[j]) == ref) {  /* De-duplicate sunk allocations. */
28,676✔
912
            copyTV(L, o, &frame[snap_slot(map[j])]);
10✔
913
            goto dupslot;
10✔
914
          }
915
        snap_unsink(J, T, ex, snapno, rfilt, ir, o);
1,893✔
916
      dupslot:
1,903✔
917
        continue;
1,903✔
918
      }
919
      snap_restoreval(J, T, ex, snapno, rfilt, ref, o);
42,955✔
920
      if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM) && tvisint(o)) {
42,955✔
921
        TValue tmp;
922
        snap_restoreval(J, T, ex, snapno, rfilt, ref+1, &tmp);
923
        o->u32.hi = tmp.u32.lo;
924
#if !LJ_FR2
925
      } else if ((sn & (SNAP_CONT|SNAP_FRAME))) {
926
        /* Overwrite tag with frame link. */
927
        setframe_ftsz(o, snap_slot(sn) != 0 ? (int32_t)*flinks-- : ftsz0);
928
        L->base = o+1;
929
#endif
930
      }
931
    }
932
  }
933
#if LJ_FR2
934
  L->base += (map[nent+LJ_BE] & 0xff);
14,072✔
935
#endif
936
  lj_assertJ(map + nent == flinks, "inconsistent frames in snapshot");
14,072✔
937

938
  /* Compute current stack top. */
939
  switch (bc_op(*pc)) {
14,072✔
940
  default:
14,031✔
941
    if (bc_op(*pc) < BC_FUNCF) {
14,031✔
942
      L->top = curr_topL(L);
14,028✔
943
      break;
14,028✔
944
    }
945
    /* fallthrough */
946
  case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM:
947
    L->top = frame + snap->nslots;
44✔
948
    break;
44✔
949
  }
950
  J->nsnaprestore++;
14,072✔
951
  return pc;
14,072✔
952
}
953

954
#undef emitir_raw
955
#undef emitir
956

957
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc