• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 7276372139

20 Dec 2023 01:54PM UTC coverage: 88.873% (+0.5%) from 88.398%
7276372139

push

github

igormunkin
FFI: Fix dangling reference to CType in carith_checkarg().

Reported by Sergey Kaplun.

(cherry-picked from commit db944b2b5)

During of an arithmetic operation with a cdata function object and some
cdata value in `carith_checkarg()`, reallocation of `cts->tab` in
`lj_ctype_intern()` may occur. In that case, the reference to the first
`CType` object (`ca->ct[0]`) becomes invalid. This patch saves the
`CTypeID` of this object and gets its `CType` again after possible
reallocation.

Sergey Kaplun:
* added the description and the test for the problem

Part of tarantool/tarantool#9145

5400 of 5995 branches covered (0.0%)

Branch coverage included in aggregate %.

3 of 3 new or added lines in 1 file covered. (100.0%)

240 existing lines in 15 files now uncovered.

20687 of 23358 relevant lines covered (88.56%)

2751138.68 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.11
/src/lj_snap.c
1
/*
2
** Snapshot handling.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_snap_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10

11
#if LJ_HASJIT
12

13
#include "lj_gc.h"
14
#include "lj_tab.h"
15
#include "lj_state.h"
16
#include "lj_frame.h"
17
#include "lj_bc.h"
18
#include "lj_ir.h"
19
#include "lj_jit.h"
20
#include "lj_iropt.h"
21
#include "lj_trace.h"
22
#include "lj_snap.h"
23
#include "lj_target.h"
24
#if LJ_HASFFI
25
#include "lj_ctype.h"
26
#include "lj_cdata.h"
27
#endif
28

29
/* Pass IR on to next optimization in chain (FOLD). */
30
#define emitir(ot, a, b)        (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
31

32
/* Emit raw IR without passing through optimizations. */
33
#define emitir_raw(ot, a, b)        (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
34

35
/* -- Snapshot buffer allocation ------------------------------------------ */
36

37
/* Grow snapshot buffer. */
38
void lj_snap_grow_buf_(jit_State *J, MSize need)
143✔
39
{
40
  MSize maxsnap = (MSize)J->param[JIT_P_maxsnap];
143✔
41
  if (need > maxsnap)
143✔
42
    lj_trace_err(J, LJ_TRERR_SNAPOV);
×
43
  lj_mem_growvec(J->L, J->snapbuf, J->sizesnap, maxsnap, SnapShot);
143✔
44
  J->cur.snap = J->snapbuf;
143✔
45
}
143✔
46

47
/* Grow snapshot map buffer. */
48
void lj_snap_grow_map_(jit_State *J, MSize need)
144✔
49
{
50
  if (need < 2*J->sizesnapmap)
144✔
51
    need = 2*J->sizesnapmap;
52
  else if (need < 64)
96✔
53
    need = 64;
54
  J->snapmapbuf = (SnapEntry *)lj_mem_realloc(J->L, J->snapmapbuf,
288✔
55
                    J->sizesnapmap*sizeof(SnapEntry), need*sizeof(SnapEntry));
144✔
56
  J->cur.snapmap = J->snapmapbuf;
144✔
57
  J->sizesnapmap = need;
144✔
58
}
144✔
59

60
/* -- Snapshot generation ------------------------------------------------- */
61

62
/* Add all modified slots to the snapshot. */
63
static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
94,816✔
64
{
65
  IRRef retf = J->chain[IR_RETF];  /* Limits SLOAD restore elimination. */
94,816✔
66
  BCReg s;
94,816✔
67
  MSize n = 0;
94,816✔
68
  for (s = 0; s < nslots; s++) {
1,042,474✔
69
    TRef tr = J->slot[s];
947,658✔
70
    IRRef ref = tref_ref(tr);
947,658✔
71
#if LJ_FR2
72
    if (s == 1) {  /* Ignore slot 1 in LJ_FR2 mode, except if tailcalled. */
947,658✔
73
      if ((tr & TREF_FRAME))
94,816✔
74
        map[n++] = SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL);
17,869✔
75
      continue;
94,816✔
76
    }
77
    if ((tr & (TREF_FRAME | TREF_CONT)) && !ref) {
852,842✔
78
      cTValue *base = J->L->base - J->baseslot;
10,330✔
79
      tr = J->slot[s] = (tr & 0xff0000) | lj_ir_k64(J, IR_KNUM, base[s].u64);
10,330✔
80
      ref = tref_ref(tr);
10,330✔
81
    }
82
#endif
83
    if (ref) {
852,842✔
84
      SnapEntry sn = SNAP_TR(s, tr);
477,620✔
85
      IRIns *ir = &J->cur.ir[ref];
477,620✔
86
      if ((LJ_FR2 || !(sn & (SNAP_CONT|SNAP_FRAME))) &&
477,620✔
87
          ir->o == IR_SLOAD && ir->op1 == s && ref > retf) {
477,620✔
88
        /* No need to snapshot unmodified non-inherited slots. */
89
        if (!(ir->op2 & IRSLOAD_INHERIT))
253,824✔
90
          continue;
199,573✔
91
        /* No need to restore readonly slots and unmodified non-parent slots. */
92
        if (!(LJ_DUALNUM && (ir->op2 & IRSLOAD_CONVERT)) &&
54,251✔
93
            (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT)
94
          sn |= SNAP_NORESTORE;
41,535✔
95
      }
96
      if (LJ_SOFTFP32 && irt_isnum(ir->t))
278,047✔
97
        sn |= SNAP_SOFTFPNUM;
98
      map[n++] = sn;
278,047✔
99
    }
100
  }
101
  return n;
94,816✔
102
}
103

104
/* Add frame links at the end of the snapshot. */
105
static MSize snapshot_framelinks(jit_State *J, SnapEntry *map, uint8_t *topslot)
94,816✔
106
{
107
  cTValue *frame = J->L->base - 1;
94,816✔
108
  cTValue *lim = J->L->base - J->baseslot + LJ_FR2;
94,816✔
109
  GCfunc *fn = frame_func(frame);
94,816✔
110
  cTValue *ftop = isluafunc(fn) ? (frame+funcproto(fn)->framesize) : J->L->top;
94,816✔
111
#if LJ_FR2
112
  uint64_t pcbase = (u64ptr(J->pc) << 8) | (J->baseslot - 2);
94,816✔
113
  lj_assertJ(2 <= J->baseslot && J->baseslot <= 257, "bad baseslot");
94,816✔
114
  memcpy(map, &pcbase, sizeof(uint64_t));
94,816✔
115
#else
116
  MSize f = 0;
117
  map[f++] = SNAP_MKPC(J->pc);  /* The current PC is always the first entry. */
118
  lj_assertJ(!J->pt ||
119
             (J->pc >= proto_bc(J->pt) &&
120
              J->pc < proto_bc(J->pt) + J->pt->sizebc), "bad snapshot PC");
121
#endif
122
  while (frame > lim) {  /* Backwards traversal of all frames above base. */
129,979✔
123
    if (frame_islua(frame)) {
35,163✔
124
#if !LJ_FR2
125
      map[f++] = SNAP_MKPC(frame_pc(frame));
126
#endif
127
      frame = frame_prevl(frame);
34,212✔
128
    } else if (frame_iscont(frame)) {
951✔
129
#if !LJ_FR2
130
      map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
131
      map[f++] = SNAP_MKPC(frame_contpc(frame));
132
#endif
133
      frame = frame_prevd(frame);
305✔
134
    } else {
135
      lj_assertJ(!frame_isc(frame), "broken frame chain");
646✔
136
#if !LJ_FR2
137
      map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
138
#endif
139
      frame = frame_prevd(frame);
646✔
140
      continue;
646✔
141
    }
142
    if (frame + funcproto(frame_func(frame))->framesize > ftop)
34,517✔
143
      ftop = frame + funcproto(frame_func(frame))->framesize;
144
  }
145
  *topslot = (uint8_t)(ftop - lim);
94,816✔
146
#if LJ_FR2
147
  lj_assertJ(sizeof(SnapEntry) * 2 == sizeof(uint64_t), "bad SnapEntry def");
94,816✔
148
  return 2;
94,816✔
149
#else
150
  lj_assertJ(f == (MSize)(1 + J->framedepth), "miscalculated snapshot size");
151
  return f;
152
#endif
153
}
154

155
/* Take a snapshot of the current stack. */
156
static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap)
94,816✔
157
{
158
  BCReg nslots = J->baseslot + J->maxslot;
94,816✔
159
  MSize nent;
94,816✔
160
  SnapEntry *p;
94,816✔
161
  /* Conservative estimate. */
162
  lj_snap_grow_map(J, nsnapmap + nslots + (MSize)(LJ_FR2?2:J->framedepth+1));
94,816✔
163
  p = &J->cur.snapmap[nsnapmap];
94,816✔
164
  nent = snapshot_slots(J, p, nslots);
94,816✔
165
  snap->nent = (uint8_t)nent;
94,816✔
166
  nent += snapshot_framelinks(J, p + nent, &snap->topslot);
94,816✔
167
  snap->mapofs = (uint32_t)nsnapmap;
94,816✔
168
  snap->ref = (IRRef1)J->cur.nins;
94,816✔
169
  snap->mcofs = 0;
94,816✔
170
  snap->nslots = (uint8_t)nslots;
94,816✔
171
  snap->count = 0;
94,816✔
172
  J->cur.nsnapmap = (uint32_t)(nsnapmap + nent);
94,816✔
173
}
94,816✔
174

175
/* Add or merge a snapshot. */
176
void lj_snap_add(jit_State *J)
94,816✔
177
{
178
  MSize nsnap = J->cur.nsnap;
94,816✔
179
  MSize nsnapmap = J->cur.nsnapmap;
94,816✔
180
  /* Merge if no ins. inbetween or if requested and no guard inbetween. */
181
  if ((nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins) ||
94,816✔
182
      (J->mergesnap && !irt_isguard(J->guardemit))) {
80,877✔
183
    if (nsnap == 1) {  /* But preserve snap #0 PC. */
17,914✔
184
      emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
614✔
185
      goto nomerge;
614✔
186
    }
187
    nsnapmap = J->cur.snap[--nsnap].mapofs;
17,300✔
188
  } else {
189
  nomerge:
76,902✔
190
    lj_snap_grow_buf(J, nsnap+1);
77,516✔
191
    J->cur.nsnap = (uint16_t)(nsnap+1);
77,516✔
192
  }
193
  J->mergesnap = 0;
94,816✔
194
  J->guardemit.irt = 0;
94,816✔
195
  snapshot_stack(J, &J->cur.snap[nsnap], nsnapmap);
94,816✔
196
}
94,816✔
197

198
/* -- Snapshot modification ----------------------------------------------- */
199

200
#define SNAP_USEDEF_SLOTS        (LJ_MAX_JSLOTS+LJ_STACK_EXTRA)
201

202
/* Find unused slots with reaching-definitions bytecode data-flow analysis. */
203
static BCReg snap_usedef(jit_State *J, uint8_t *udf,
204
                         const BCIns *pc, BCReg maxslot)
205
{
206
  BCReg s;
207
  GCobj *o;
208

209
  if (maxslot == 0) return 0;
210
#ifdef LUAJIT_USE_VALGRIND
211
  /* Avoid errors for harmless reads beyond maxslot. */
212
  memset(udf, 1, SNAP_USEDEF_SLOTS);
213
#else
214
  memset(udf, 1, maxslot);
215
#endif
216

217
  /* Treat open upvalues as used. */
218
  o = gcref(J->L->openupval);
219
  while (o) {
220
    if (uvval(gco2uv(o)) < J->L->base) break;
221
    udf[uvval(gco2uv(o)) - J->L->base] = 0;
222
    o = gcref(o->gch.nextgc);
223
  }
224

225
#define USE_SLOT(s)                udf[(s)] &= ~1
226
#define DEF_SLOT(s)                udf[(s)] *= 3
227

228
  /* Scan through following bytecode and check for uses/defs. */
229
  lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
230
             "snapshot PC out of range");
231
  for (;;) {
232
    BCIns ins = *pc++;
233
    BCOp op = bc_op(ins);
234
    switch (bcmode_b(op)) {
235
    case BCMvar: USE_SLOT(bc_b(ins)); break;
236
    default: break;
237
    }
238
    switch (bcmode_c(op)) {
239
    case BCMvar: USE_SLOT(bc_c(ins)); break;
240
    case BCMrbase:
241
      lj_assertJ(op == BC_CAT, "unhandled op %d with RC rbase", op);
242
      for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s);
243
      for (; s < maxslot; s++) DEF_SLOT(s);
244
      break;
245
    case BCMjump:
246
    handle_jump: {
247
      BCReg minslot = bc_a(ins);
248
      if (op >= BC_FORI && op <= BC_JFORL) minslot += FORL_EXT;
249
      else if (op >= BC_ITERL && op <= BC_JITERL) minslot += bc_b(pc[-2])-1;
250
      else if (op == BC_UCLO) { pc += bc_j(ins); break; }
251
      for (s = minslot; s < maxslot; s++) DEF_SLOT(s);
252
      return minslot < maxslot ? minslot : maxslot;
253
      }
254
    case BCMlit:
255
      if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
256
        goto handle_jump;
257
      } else if (bc_isret(op)) {
258
        BCReg top = op == BC_RETM ? maxslot : (bc_a(ins) + bc_d(ins)-1);
259
        for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
260
        for (; s < top; s++) USE_SLOT(s);
261
        for (; s < maxslot; s++) DEF_SLOT(s);
262
        return 0;
263
      }
264
      break;
265
    case BCMfunc: return maxslot;  /* NYI: will abort, anyway. */
266
    default: break;
267
    }
268
    switch (bcmode_a(op)) {
269
    case BCMvar: USE_SLOT(bc_a(ins)); break;
270
    case BCMdst:
271
       if (!(op == BC_ISTC || op == BC_ISFC)) DEF_SLOT(bc_a(ins));
272
       break;
273
    case BCMbase:
274
      if (op >= BC_CALLM && op <= BC_ITERN) {
275
        BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ?
276
                    maxslot : (bc_a(ins) + bc_c(ins)+LJ_FR2);
277
        if (LJ_FR2) DEF_SLOT(bc_a(ins)+1);
278
        s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0);
279
        for (; s < top; s++) USE_SLOT(s);
280
        for (; s < maxslot; s++) DEF_SLOT(s);
281
        if (op == BC_CALLT || op == BC_CALLMT) {
282
          for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
283
          return 0;
284
        }
285
      } else if (op == BC_VARG) {
286
        return maxslot;  /* NYI: punt. */
287
      } else if (op == BC_KNIL) {
288
        for (s = bc_a(ins); s <= bc_d(ins); s++) DEF_SLOT(s);
289
      } else if (op == BC_TSETM) {
290
        for (s = bc_a(ins)-1; s < maxslot; s++) USE_SLOT(s);
291
      }
292
      break;
293
    default: break;
294
    }
295
    lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
296
               "use/def analysis PC out of range");
297
  }
298

299
#undef USE_SLOT
300
#undef DEF_SLOT
301

302
  return 0;  /* unreachable */
303
}
304

305
/* Purge dead slots before the next snapshot. */
306
void lj_snap_purge(jit_State *J)
66,219✔
307
{
308
  uint8_t udf[SNAP_USEDEF_SLOTS];
66,219✔
309
  BCReg s, maxslot = J->maxslot;
66,219✔
310
  if (bc_op(*J->pc) == BC_FUNCV && maxslot > J->pt->numparams)
66,219✔
311
    maxslot = J->pt->numparams;
312
  s = snap_usedef(J, udf, J->pc, maxslot);
66,219✔
313
  for (; s < maxslot; s++)
251,933✔
314
    if (udf[s] != 0)
119,495✔
315
      J->base[s] = 0;  /* Purge dead slots. */
103,805✔
316
}
66,219✔
317

318
/* Shrink last snapshot. */
319
void lj_snap_shrink(jit_State *J)
11,235✔
320
{
321
  SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
11,235✔
322
  SnapEntry *map = &J->cur.snapmap[snap->mapofs];
11,235✔
323
  MSize n, m, nlim, nent = snap->nent;
11,235✔
324
  uint8_t udf[SNAP_USEDEF_SLOTS];
11,235✔
325
  BCReg maxslot = J->maxslot;
11,235✔
326
  BCReg baseslot = J->baseslot;
11,235✔
327
  BCReg minslot = snap_usedef(J, udf, snap_pc(&map[nent]), maxslot);
11,235✔
328
  maxslot += baseslot;
11,235✔
329
  minslot += baseslot;
11,235✔
330
  snap->nslots = (uint8_t)maxslot;
11,235✔
331
  for (n = m = 0; n < nent; n++) {  /* Remove unused slots from snapshot. */
87,931✔
332
    BCReg s = snap_slot(map[n]);
76,696✔
333
    if (s < minslot || (s < maxslot && udf[s-baseslot] == 0))
76,696✔
334
      map[m++] = map[n];  /* Only copy used slots. */
65,234✔
335
  }
336
  snap->nent = (uint8_t)m;
11,235✔
337
  nlim = J->cur.nsnapmap - snap->mapofs - 1;
11,235✔
338
  while (n <= nlim) map[m++] = map[n++];  /* Move PC + frame links down. */
33,705✔
339
  J->cur.nsnapmap = (uint32_t)(snap->mapofs + m);  /* Free up space in map. */
11,235✔
340
}
11,235✔
341

342
/* -- Snapshot access ----------------------------------------------------- */
343

344
/* Initialize a Bloom Filter with all renamed refs.
345
** There are very few renames (often none), so the filter has
346
** very few bits set. This makes it suitable for negative filtering.
347
*/
348
static BloomFilter snap_renamefilter(GCtrace *T, SnapNo lim)
34,160✔
349
{
350
  BloomFilter rfilt = 0;
34,160✔
351
  IRIns *ir;
34,160✔
352
  for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
36,645✔
353
    if (ir->op2 <= lim)
2,485✔
354
      bloomset(rfilt, ir->op1);
1,364✔
355
  return rfilt;
30,962✔
356
}
357

358
/* Process matching renames to find the original RegSP. */
359
static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs)
959✔
360
{
361
  IRIns *ir;
959✔
362
  for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
4,337✔
363
    if (ir->op1 == ref && ir->op2 <= lim)
3,378✔
364
      rs = ir->prev;
1,027✔
365
  return rs;
366
}
367

368
/* Copy RegSP from parent snapshot to the parent links of the IR. */
369
IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno, IRIns *ir)
3,198✔
370
{
371
  SnapShot *snap = &T->snap[snapno];
3,198✔
372
  SnapEntry *map = &T->snapmap[snap->mapofs];
3,198✔
373
  BloomFilter rfilt = snap_renamefilter(T, snapno);
3,198✔
374
  MSize n = 0;
375
  IRRef ref = 0;
9,264✔
376
  UNUSED(J);
9,264✔
377
  for ( ; ; ir++) {
15,330✔
378
    uint32_t rs;
9,264✔
379
    if (ir->o == IR_SLOAD) {
9,264✔
380
      if (!(ir->op2 & IRSLOAD_PARENT)) break;
6,076✔
381
      for ( ; ; n++) {
18,571✔
382
        lj_assertJ(n < snap->nent, "slot %d not found in snapshot", ir->op1);
7,013✔
383
        if (snap_slot(map[n]) == ir->op1) {
11,558✔
384
          ref = snap_ref(map[n++]);
4,545✔
385
          break;
4,545✔
386
        }
387
      }
388
    } else if (LJ_SOFTFP32 && ir->o == IR_HIOP) {
3,188✔
389
      ref++;
390
    } else if (ir->o == IR_PVAL) {
3,188✔
391
      ref = ir->op1 + REF_BIAS;
1,521✔
392
    } else {
393
      break;
394
    }
395
    rs = T->ir[ref].prev;
6,066✔
396
    if (bloomtest(rfilt, ref))
6,066✔
397
      rs = snap_renameref(T, snapno, ref, rs);
55✔
398
    ir->prev = (uint16_t)rs;
6,066✔
399
    lj_assertJ(regsp_used(rs), "unused IR %04d in snapshot", ref - REF_BIAS);
6,066✔
400
  }
401
  return ir;
3,198✔
402
}
403

404
/* -- Snapshot replay ----------------------------------------------------- */
405

406
/* Replay constant from parent trace. */
407
static TRef snap_replay_const(jit_State *J, IRIns *ir)
9,210✔
408
{
409
  /* Only have to deal with constants that can occur in stack slots. */
410
  switch ((IROp)ir->o) {
9,210✔
411
  case IR_KPRI: return TREF_PRI(irt_type(ir->t));
50✔
412
  case IR_KINT: return lj_ir_kint(J, ir->i);
3,207✔
413
  case IR_KGC: return lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t));
3,955✔
414
  case IR_KNUM: case IR_KINT64:
1,998✔
415
    return lj_ir_k64(J, (IROp)ir->o, ir_k64(ir)->u64);
1,998✔
UNCOV
416
  case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir));  /* Continuation. */
×
417
  default: lj_assertJ(0, "bad IR constant op %d", ir->o); return TREF_NIL;
418
  }
419
}
420

421
/* De-duplicate parent reference. */
422
static TRef snap_dedup(jit_State *J, SnapEntry *map, MSize nmax, IRRef ref)
423
{
424
  MSize j;
425
  for (j = 0; j < nmax; j++)
8,617✔
426
    if (snap_ref(map[j]) == ref)
8,356✔
427
      return J->slot[snap_slot(map[j])] & ~(SNAP_CONT|SNAP_FRAME);
619✔
428
  return 0;
429
}
430

431
/* Emit parent reference with de-duplication. */
432
static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax,
433
                      BloomFilter seen, IRRef ref)
434
{
435
  IRIns *ir = &T->ir[ref];
436
  TRef tr;
437
  if (irref_isk(ref))
438
    tr = snap_replay_const(J, ir);
439
  else if (!regsp_used(ir->prev))
440
    tr = 0;
441
  else if (!bloomtest(seen, ref) || (tr = snap_dedup(J, map, nmax, ref)) == 0)
442
    tr = emitir(IRT(IR_PVAL, irt_type(ir->t)), ref - REF_BIAS, 0);
443
  return tr;
444
}
445

446
/* Check whether a sunk store corresponds to an allocation. Slow path. */
447
static int snap_sunk_store2(GCtrace *T, IRIns *ira, IRIns *irs)
448
{
449
  if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
450
      irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
451
    IRIns *irk = &T->ir[irs->op1];
452
    if (irk->o == IR_AREF || irk->o == IR_HREFK)
453
      irk = &T->ir[irk->op1];
454
    return (&T->ir[irk->op1] == ira);
455
  }
456
  return 0;
457
}
458

459
/* Check whether a sunk store corresponds to an allocation. Fast path. */
460
static LJ_AINLINE int snap_sunk_store(GCtrace *T, IRIns *ira, IRIns *irs)
54✔
461
{
462
  if (irs->s != 255)
54✔
463
    return (ira + irs->s == irs);  /* Fast check. */
54✔
UNCOV
464
  return snap_sunk_store2(T, ira, irs);
×
465
}
466

467
/* Replay snapshot state to setup side trace. */
468
void lj_snap_replay(jit_State *J, GCtrace *T)
3,030✔
469
{
470
  SnapShot *snap = &T->snap[J->exitno];
3,030✔
471
  SnapEntry *map = &T->snapmap[snap->mapofs];
3,030✔
472
  MSize n, nent = snap->nent;
3,030✔
473
  BloomFilter seen = 0;
3,030✔
474
  int pass23 = 0;
3,030✔
475
  J->framedepth = 0;
3,030✔
476
  /* Emit IR for slots inherited from parent snapshot. */
477
  for (n = 0; n < nent; n++) {
16,281✔
478
    SnapEntry sn = map[n];
13,251✔
479
    BCReg s = snap_slot(sn);
13,251✔
480
    IRRef ref = snap_ref(sn);
13,251✔
481
    IRIns *ir = &T->ir[ref];
13,251✔
482
    TRef tr;
13,251✔
483
    /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */
484
    if (bloomtest(seen, ref) && (tr = snap_dedup(J, map, n, ref)) != 0)
14,131✔
485
      goto setslot;
619✔
486
    bloomset(seen, ref);
12,632✔
487
    if (irref_isk(ref)) {
12,632✔
488
      /* See special treatment of LJ_FR2 slot 1 in snapshot_slots() above. */
489
      if (LJ_FR2 && (sn == SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL)))
6,469✔
490
        tr = 0;
491
      else
492
        tr = snap_replay_const(J, ir);
6,162✔
493
    } else if (!regsp_used(ir->prev)) {
6,163✔
494
      pass23 = 1;
495
      lj_assertJ(s != 0, "unused slot 0 in snapshot");
496
      tr = s;
497
    } else {
498
      IRType t = irt_type(ir->t);
4,644✔
499
      uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT;
4,644✔
500
      if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM;
4,644✔
501
      if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY);
4,644✔
502
      tr = emitir_raw(IRT(IR_SLOAD, t), s, mode);
4,644✔
503
    }
504
  setslot:
13,251✔
505
    J->slot[s] = tr | (sn&(SNAP_CONT|SNAP_FRAME));  /* Same as TREF_* flags. */
13,251✔
506
    J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && (s != LJ_FR2));
13,251✔
507
    if ((sn & SNAP_FRAME))
13,251✔
508
      J->baseslot = s+1;
2,327✔
509
  }
510
  if (pass23) {
3,030✔
511
    IRIns *irlast = &T->ir[snap->ref];
128✔
512
    pass23 = 0;
128✔
513
    /* Emit dependent PVALs. */
514
    for (n = 0; n < nent; n++) {
2,603✔
515
      SnapEntry sn = map[n];
2,475✔
516
      IRRef refp = snap_ref(sn);
2,475✔
517
      IRIns *ir = &T->ir[refp];
2,475✔
518
      if (regsp_reg(ir->r) == RID_SUNK) {
2,475✔
519
        if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue;
1,518✔
520
        pass23 = 1;
1,517✔
521
        lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
1,517✔
522
                   ir->o == IR_CNEW || ir->o == IR_CNEWI,
523
                   "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
524
        if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1);
1,517✔
525
        if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2);
1,517✔
526
        if (LJ_HASFFI && ir->o == IR_CNEWI) {
1,517✔
527
          if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP)
528
            snap_pref(J, T, map, nent, seen, (ir+1)->op2);
529
        } else {
530
          IRIns *irs;
11✔
531
          for (irs = ir+1; irs < irlast; irs++)
182✔
532
            if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
198✔
533
              if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
19✔
534
                snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
6✔
535
              else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
536
                       irs+1 < irlast && (irs+1)->o == IR_HIOP)
537
                snap_pref(J, T, map, nent, seen, (irs+1)->op2);
538
            }
539
        }
540
      } else if (!irref_isk(refp) && !regsp_used(ir->prev)) {
957✔
541
        lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
2✔
542
                   "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
543
        J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1);
2✔
544
      }
545
    }
546
    /* Replay sunk instructions. */
547
    for (n = 0; pass23 && n < nent; n++) {
2,600✔
548
      SnapEntry sn = map[n];
2,472✔
549
      IRRef refp = snap_ref(sn);
2,472✔
550
      IRIns *ir = &T->ir[refp];
2,472✔
551
      if (regsp_reg(ir->r) == RID_SUNK) {
2,472✔
552
        TRef op1, op2;
1,518✔
553
        if (J->slot[snap_slot(sn)] != snap_slot(sn)) {  /* De-dup allocs. */
1,518✔
554
          J->slot[snap_slot(sn)] = J->slot[J->slot[snap_slot(sn)]];
1✔
555
          continue;
1✔
556
        }
557
        op1 = ir->op1;
1,517✔
558
        if (op1 >= T->nk) op1 = snap_pref(J, T, map, nent, seen, op1);
1,517✔
559
        op2 = ir->op2;
1,517✔
560
        if (op2 >= T->nk) op2 = snap_pref(J, T, map, nent, seen, op2);
1,517✔
561
        if (LJ_HASFFI && ir->o == IR_CNEWI) {
1,517✔
562
          if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) {
1,506✔
563
            lj_needsplit(J);  /* Emit joining HIOP. */
564
            op2 = emitir_raw(IRT(IR_HIOP, IRT_I64), op2,
565
                             snap_pref(J, T, map, nent, seen, (ir+1)->op2));
566
          }
567
          J->slot[snap_slot(sn)] = emitir(ir->ot & ~(IRT_MARK|IRT_ISPHI), op1, op2);
1,506✔
568
        } else {
569
          IRIns *irs;
11✔
570
          TRef tr = emitir(ir->ot, op1, op2);
11✔
571
          J->slot[snap_slot(sn)] = tr;
11✔
572
          for (irs = ir+1; irs < irlast; irs++)
182✔
573
            if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
217✔
574
              IRIns *irr = &T->ir[irs->op1];
19✔
575
              TRef val, key = irr->op2, tmp = tr;
19✔
576
              if (irr->o != IR_FREF) {
19✔
577
                IRIns *irk = &T->ir[key];
18✔
578
                if (irr->o == IR_HREFK)
18✔
579
                  key = lj_ir_kslot(J, snap_replay_const(J, &T->ir[irk->op1]),
2✔
580
                                    irk->op2);
2✔
581
                else
582
                  key = snap_replay_const(J, irk);
16✔
583
                if (irr->o == IR_HREFK || irr->o == IR_AREF) {
18✔
584
                  IRIns *irf = &T->ir[irr->op1];
10✔
585
                  tmp = emitir(irf->ot, tmp, irf->op2);
10✔
586
                }
587
              }
588
              tmp = emitir(irr->ot, tmp, key);
19✔
589
              val = snap_pref(J, T, map, nent, seen, irs->op2);
19✔
590
              if (val == 0) {
19✔
591
                IRIns *irc = &T->ir[irs->op2];
6✔
592
                lj_assertJ(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT,
6✔
593
                           "sunk store for parent IR %04d with bad op %d",
594
                           refp - REF_BIAS, irc->o);
595
                val = snap_pref(J, T, map, nent, seen, irc->op1);
6✔
596
                val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
6✔
597
              } else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
598
                         irs+1 < irlast && (irs+1)->o == IR_HIOP) {
599
                IRType t = IRT_I64;
600
                if (LJ_SOFTFP32 && irt_type((irs+1)->t) == IRT_SOFTFP)
601
                  t = IRT_NUM;
602
                lj_needsplit(J);
603
                if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) {
604
                  uint64_t k = (uint32_t)T->ir[irs->op2].i +
605
                               ((uint64_t)T->ir[(irs+1)->op2].i << 32);
606
                  val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM, k);
607
                } else {
608
                  val = emitir_raw(IRT(IR_HIOP, t), val,
609
                          snap_pref(J, T, map, nent, seen, (irs+1)->op2));
610
                }
611
                tmp = emitir(IRT(irs->o, t), tmp, val);
612
                continue;
613
              }
614
              tmp = emitir(irs->ot, tmp, val);
19✔
615
            } else if (LJ_HASFFI && irs->o == IR_XBAR && ir->o == IR_CNEW) {
152✔
UNCOV
616
              emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
×
617
            }
618
        }
619
      }
620
    }
621
  }
622
  J->base = J->slot + J->baseslot;
3,030✔
623
  J->maxslot = snap->nslots - J->baseslot;
3,030✔
624
  lj_snap_add(J);
3,030✔
625
  if (pass23)  /* Need explicit GC step _after_ initial snapshot. */
3,030✔
626
    emitir_raw(IRTG(IR_GCSTEP, IRT_NIL), 0, 0);
127✔
627
}
3,030✔
628

629
/* -- Snapshot restore ---------------------------------------------------- */
630

631
static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
632
                        SnapNo snapno, BloomFilter rfilt,
633
                        IRIns *ir, TValue *o);
634

635
/* Restore a value from the trace exit state. */
636
static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex,
131,571✔
637
                            SnapNo snapno, BloomFilter rfilt,
638
                            IRRef ref, TValue *o)
639
{
640
  IRIns *ir = &T->ir[ref];
131,651✔
641
  IRType1 t = ir->t;
131,651✔
642
  RegSP rs = ir->prev;
131,651✔
643
  if (irref_isk(ref)) {  /* Restore constant slot. */
131,651✔
644
    if (ir->o == IR_KPTR) {
79,154✔
UNCOV
645
      o->u64 = (uint64_t)(uintptr_t)ir_kptr(ir);
×
646
    } else {
647
      lj_assertJ(!(ir->o == IR_KKPTR || ir->o == IR_KNULL),
79,154✔
648
                 "restore of const from IR %04d with bad op %d",
649
                 ref - REF_BIAS, ir->o);
650
      lj_ir_kvalue(J->L, o, ir);
79,154✔
651
    }
652
    return;
79,154✔
653
  }
654
  if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
52,497✔
655
    rs = snap_renameref(T, snapno, ref, rs);
904✔
656
  if (ra_hasspill(regsp_spill(rs))) {  /* Restore from spill slot. */
52,497✔
657
    int32_t *sps = &ex->spill[regsp_spill(rs)];
2,524✔
658
    if (irt_isinteger(t)) {
2,524✔
659
      setintV(o, *sps);
367✔
660
#if !LJ_SOFTFP32
661
    } else if (irt_isnum(t)) {
2,157✔
662
      o->u64 = *(uint64_t *)sps;
1,247✔
663
#endif
664
#if LJ_64 && !LJ_GC64
665
    } else if (irt_islightud(t)) {
666
      /* 64 bit lightuserdata which may escape already has the tag bits. */
667
      o->u64 = *(uint64_t *)sps;
668
#endif
669
    } else {
670
      lj_assertJ(!irt_ispri(t), "PRI ref with spill slot");
910✔
671
      setgcV(J->L, o, (GCobj *)(uintptr_t)*(GCSize *)sps, irt_toitype(t));
910✔
672
    }
673
  } else {  /* Restore from register. */
674
    Reg r = regsp_reg(rs);
49,973✔
675
    if (ra_noreg(r)) {
49,973✔
676
      lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
80✔
677
                 "restore from IR %04d has no reg", ref - REF_BIAS);
678
      snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o);
80✔
679
      if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o));
680
      return;
80✔
681
    } else if (irt_isinteger(t)) {
49,893✔
682
      setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]);
4,123✔
683
#if !LJ_SOFTFP
684
    } else if (irt_isnum(t)) {
45,770✔
685
      setnumV(o, ex->fpr[r-RID_MIN_FPR]);
13,757✔
686
#elif LJ_64  /* && LJ_SOFTFP */
687
    } else if (irt_isnum(t)) {
688
      o->u64 = ex->gpr[r-RID_MIN_GPR];
689
#endif
690
#if LJ_64 && !LJ_GC64
691
    } else if (irt_is64(t)) {
692
      /* 64 bit values that already have the tag bits. */
693
      o->u64 = ex->gpr[r-RID_MIN_GPR];
694
#endif
695
    } else if (irt_ispri(t)) {
32,013✔
UNCOV
696
      setpriV(o, irt_toitype(t));
×
697
    } else {
698
      setgcV(J->L, o, (GCobj *)ex->gpr[r-RID_MIN_GPR], irt_toitype(t));
32,013✔
699
    }
700
  }
701
}
702

703
#if LJ_HASFFI
704
/* Restore raw data from the trace exit state. */
705
static void snap_restoredata(jit_State *J, GCtrace *T, ExitState *ex,
2,114✔
706
                             SnapNo snapno, BloomFilter rfilt,
707
                             IRRef ref, void *dst, CTSize sz)
708
{
709
  IRIns *ir = &T->ir[ref];
2,114✔
710
  RegSP rs = ir->prev;
2,114✔
711
  int32_t *src;
2,114✔
712
  uint64_t tmp;
2,114✔
713
  UNUSED(J);
2,114✔
714
  if (irref_isk(ref)) {
2,114✔
715
    if (ir_isk64(ir)) {
3✔
716
      src = (int32_t *)&ir[1];
2✔
717
    } else if (sz == 8) {
1✔
UNCOV
718
      tmp = (uint64_t)(uint32_t)ir->i;
×
UNCOV
719
      src = (int32_t *)&tmp;
×
720
    } else {
721
      src = &ir->i;
1✔
722
    }
723
  } else {
724
    if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
2,111✔
UNCOV
725
      rs = snap_renameref(T, snapno, ref, rs);
×
726
    if (ra_hasspill(regsp_spill(rs))) {
2,111✔
727
      src = &ex->spill[regsp_spill(rs)];
818✔
728
      if (sz == 8 && !irt_is64(ir->t)) {
818✔
UNCOV
729
        tmp = (uint64_t)(uint32_t)*src;
×
UNCOV
730
        src = (int32_t *)&tmp;
×
731
      }
732
    } else {
733
      Reg r = regsp_reg(rs);
1,293✔
734
      if (ra_noreg(r)) {
1,293✔
735
        /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */
736
        lj_assertJ(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
13✔
737
                   "restore from IR %04d has no reg", ref - REF_BIAS);
738
        snap_restoredata(J, T, ex, snapno, rfilt, ir->op1, dst, 4);
13✔
739
        *(lua_Number *)dst = (lua_Number)*(int32_t *)dst;
13✔
740
        return;
13✔
741
      }
742
      src = (int32_t *)&ex->gpr[r-RID_MIN_GPR];
1,280✔
743
#if !LJ_SOFTFP
744
      if (r >= RID_MAX_GPR) {
1,280✔
745
        src = (int32_t *)&ex->fpr[r-RID_MIN_FPR];
16✔
746
#if LJ_TARGET_PPC
747
        if (sz == 4) {  /* PPC FPRs are always doubles. */
748
          *(float *)dst = (float)*(double *)src;
749
          return;
750
        }
751
#else
752
        if (LJ_BE && sz == 4) src++;
16✔
753
#endif
754
      } else
755
#endif
756
      if (LJ_64 && LJ_BE && sz == 4) src++;
757
    }
758
  }
759
  lj_assertJ(sz == 1 || sz == 2 || sz == 4 || sz == 8,
2,101✔
760
             "restore from IR %04d with bad size %d", ref - REF_BIAS, sz);
761
  if (sz == 4) *(int32_t *)dst = *src;
2,101✔
762
  else if (sz == 8) *(int64_t *)dst = *(int64_t *)src;
2,051✔
UNCOV
763
  else if (sz == 1) *(int8_t *)dst = (int8_t)*src;
×
UNCOV
764
  else *(int16_t *)dst = (int16_t)*src;
×
765
}
766
#endif
767

768
/* Unsink allocation from the trace exit state. Unsink sunk stores. */
769
static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
770
                        SnapNo snapno, BloomFilter rfilt,
771
                        IRIns *ir, TValue *o)
772
{
773
  lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
774
             ir->o == IR_CNEW || ir->o == IR_CNEWI,
775
             "sunk allocation with bad op %d", ir->o);
776
#if LJ_HASFFI
777
  if (ir->o == IR_CNEW || ir->o == IR_CNEWI) {
778
    CTState *cts = ctype_cts(J->L);
779
    CTypeID id = (CTypeID)T->ir[ir->op1].i;
780
    CTSize sz;
781
    CTInfo info = lj_ctype_info(cts, id, &sz);
782
    GCcdata *cd = lj_cdata_newx(cts, id, sz, info);
783
    setcdataV(J->L, o, cd);
784
    if (ir->o == IR_CNEWI) {
785
      uint8_t *p = (uint8_t *)cdataptr(cd);
786
      lj_assertJ(sz == 4 || sz == 8, "sunk cdata with bad size %d", sz);
787
      if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) {
788
        snap_restoredata(J, T, ex, snapno, rfilt, (ir+1)->op2,
789
                         LJ_LE ? p+4 : p, 4);
790
        if (LJ_BE) p += 4;
791
        sz = 4;
792
      }
793
      snap_restoredata(J, T, ex, snapno, rfilt, ir->op2, p, sz);
794
    } else {
795
      IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
796
      for (irs = ir+1; irs < irlast; irs++)
797
        if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
798
          IRIns *iro = &T->ir[T->ir[irs->op1].op2];
799
          uint8_t *p = (uint8_t *)cd;
800
          CTSize szs;
801
          lj_assertJ(irs->o == IR_XSTORE, "sunk store with bad op %d", irs->o);
802
          lj_assertJ(T->ir[irs->op1].o == IR_ADD,
803
                     "sunk store with bad add op %d", T->ir[irs->op1].o);
804
          lj_assertJ(iro->o == IR_KINT || iro->o == IR_KINT64,
805
                     "sunk store with bad const offset op %d", iro->o);
806
          if (irt_is64(irs->t)) szs = 8;
807
          else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1;
808
          else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2;
809
          else szs = 4;
810
          if (LJ_64 && iro->o == IR_KINT64)
811
            p += (int64_t)ir_k64(iro)->u64;
812
          else
813
            p += iro->i;
814
          lj_assertJ(p >= (uint8_t *)cdataptr(cd) &&
815
                     p + szs <= (uint8_t *)cdataptr(cd) + sz,
816
                     "sunk store with offset out of range");
817
          if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
818
            lj_assertJ(szs == 4, "sunk store with bad size %d", szs);
819
            snap_restoredata(J, T, ex, snapno, rfilt, (irs+1)->op2,
820
                             LJ_LE ? p+4 : p, 4);
821
            if (LJ_BE) p += 4;
822
          }
823
          snap_restoredata(J, T, ex, snapno, rfilt, irs->op2, p, szs);
824
        }
825
    }
826
  } else
827
#endif
828
  {
829
    IRIns *irs, *irlast;
830
    GCtab *t = ir->o == IR_TNEW ? lj_tab_new(J->L, ir->op1, ir->op2) :
831
                                  lj_tab_dup(J->L, ir_ktab(&T->ir[ir->op1]));
832
    settabV(J->L, o, t);
833
    irlast = &T->ir[T->snap[snapno].ref];
834
    for (irs = ir+1; irs < irlast; irs++)
835
      if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
836
        IRIns *irk = &T->ir[irs->op1];
837
        TValue tmp, *val;
838
        lj_assertJ(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
839
                   irs->o == IR_FSTORE,
840
                   "sunk store with bad op %d", irs->o);
841
        if (irk->o == IR_FREF) {
842
          switch (irk->op2) {
843
          case IRFL_TAB_META:
844
            snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp);
845
            /* NOBARRIER: The table is new (marked white). */
846
            setgcref(t->metatable, obj2gco(tabV(&tmp)));
847
            break;
848
          case IRFL_TAB_NOMM:
849
            /* Negative metamethod cache invalidated by lj_tab_set() below. */
850
            break;
851
          default:
852
            lj_assertJ(0, "sunk store with bad field %d", irk->op2);
853
            break;
854
          }
855
        } else {
856
          irk = &T->ir[irk->op2];
857
          if (irk->o == IR_KSLOT) irk = &T->ir[irk->op1];
858
          lj_ir_kvalue(J->L, &tmp, irk);
859
          val = lj_tab_set(J->L, t, &tmp);
860
          /* NOBARRIER: The table is new (marked white). */
861
          snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, val);
862
          if (LJ_SOFTFP32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
863
            snap_restoreval(J, T, ex, snapno, rfilt, (irs+1)->op2, &tmp);
864
            val->u32.hi = tmp.u32.lo;
865
          }
866
        }
867
      }
868
  }
869
}
870

871
/* Restore interpreter state from exit state with the help of a snapshot. */
872
const BCIns *lj_snap_restore(jit_State *J, void *exptr)
30,962✔
873
{
874
  ExitState *ex = (ExitState *)exptr;
30,962✔
875
  SnapNo snapno = J->exitno;  /* For now, snapno == exitno. */
30,962✔
876
  GCtrace *T = traceref(J, J->parent);
30,962✔
877
  SnapShot *snap = &T->snap[snapno];
30,962✔
878
  MSize n, nent = snap->nent;
30,962✔
879
  SnapEntry *map = &T->snapmap[snap->mapofs];
30,962✔
880
#if !LJ_FR2 || defined(LUA_USE_ASSERT)
881
  SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1-LJ_FR2];
882
#endif
883
#if !LJ_FR2
884
  ptrdiff_t ftsz0;
885
#endif
886
  TValue *frame;
30,962✔
887
  BloomFilter rfilt = snap_renamefilter(T, snapno);
30,962✔
888
  const BCIns *pc = snap_pc(&map[nent]);
30,962✔
889
  lua_State *L = J->L;
30,962✔
890

891
  /* Set interpreter PC to the next PC to get correct error messages. */
892
  setcframe_pc(cframe_raw(L->cframe), pc+1);
30,962✔
893

894
  /* Make sure the stack is big enough for the slots from the snapshot. */
895
  if (LJ_UNLIKELY(L->base + snap->topslot >= tvref(L->maxstack))) {
30,962✔
896
    L->top = curr_topL(L);
13✔
897
    lj_state_growstack(L, snap->topslot - curr_proto(L)->framesize);
13✔
898
  }
899

900
  /* Fill stack slots with data from the registers and spill slots. */
901
  frame = L->base-1-LJ_FR2;
30,961✔
902
#if !LJ_FR2
903
  ftsz0 = frame_ftsz(frame);  /* Preserve link to previous frame in slot #0. */
904
#endif
905
  for (n = 0; n < nent; n++) {
169,898✔
906
    SnapEntry sn = map[n];
138,937✔
907
    if (!(sn & SNAP_NORESTORE)) {
138,937✔
908
      TValue *o = &frame[snap_slot(sn)];
133,526✔
909
      IRRef ref = snap_ref(sn);
133,526✔
910
      IRIns *ir = &T->ir[ref];
133,526✔
911
      if (ir->r == RID_SUNK) {
133,526✔
912
        MSize j;
913
        for (j = 0; j < n; j++)
31,145✔
914
          if (snap_ref(map[j]) == ref) {  /* De-duplicate sunk allocations. */
28,987✔
915
            copyTV(L, o, &frame[snap_slot(map[j])]);
21✔
916
            goto dupslot;
21✔
917
          }
918
        snap_unsink(J, T, ex, snapno, rfilt, ir, o);
2,158✔
919
      dupslot:
2,179✔
920
        continue;
2,179✔
921
      }
922
      snap_restoreval(J, T, ex, snapno, rfilt, ref, o);
131,347✔
923
      if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM) && tvisint(o)) {
131,347✔
924
        TValue tmp;
925
        snap_restoreval(J, T, ex, snapno, rfilt, ref+1, &tmp);
926
        o->u32.hi = tmp.u32.lo;
927
#if !LJ_FR2
928
      } else if ((sn & (SNAP_CONT|SNAP_FRAME))) {
929
        /* Overwrite tag with frame link. */
930
        setframe_ftsz(o, snap_slot(sn) != 0 ? (int32_t)*flinks-- : ftsz0);
931
        L->base = o+1;
932
#endif
933
      }
934
    }
935
  }
936
#if LJ_FR2
937
  L->base += (map[nent+LJ_BE] & 0xff);
30,961✔
938
#endif
939
  lj_assertJ(map + nent == flinks, "inconsistent frames in snapshot");
30,961✔
940

941
  /* Compute current stack top. */
942
  switch (bc_op(*pc)) {
30,961✔
943
  default:
30,727✔
944
    if (bc_op(*pc) < BC_FUNCF) {
30,727✔
945
      L->top = curr_topL(L);
30,724✔
946
      break;
30,724✔
947
    }
948
    /* fallthrough */
949
  case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM:
950
    L->top = frame + snap->nslots;
237✔
951
    break;
237✔
952
  }
953
  J->nsnaprestore++;
30,961✔
954
  return pc;
30,961✔
955
}
956

957
#undef emitir_raw
958
#undef emitir
959

960
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc