• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 7119513137

06 Dec 2023 07:37PM UTC coverage: 88.514% (+0.05%) from 88.469%
7119513137

push

github

igormunkin
Fix snapshot PC when linking to BC_JLOOP that was a BC_RET*.

Reported by Arseny Vakhrushev.
Fix contributed by Peter Cawley.

(cherry-picked from commit 5c46f4773)

As specified in the comment in `lj_record_stop`, all loops must
set `J->pc` to the next instruction. However, the chunk of logic
in `lj_trace_exit` expects it to be set to `BC_JLOOP` itself if
it used to be a `BC_RET`. This wrong pc results in the execution
of random data that goes after `BC_JLOOP` in the case of
restoration from the snapshot.

This patch fixes that behavior by adapting the loop recording
logic to this specific case.

NOTICE: This patch is only a part of the original commit,
and the other part is backported in the previous commit. The
patch was split into two, so the test case becomes easier to
implement since it can now depend on this assertion instead
of memory layout.

Maxim Kokryashkin:
* added the description and the test for the problem

Part of tarantool/tarantool#9145

Reviewed-by: Sergey Kaplun <skaplun@tarantool.org>
Reviewed-by: Sergey Bronnikov <sergeyb@tarantool.org>
Signed-off-by: Igor Munkin <imun@tarantool.org>
(cherry picked from commit 2ab0419fa)

5352 of 5969 branches covered (0.0%)

Branch coverage included in aggregate %.

5 of 5 new or added lines in 1 file covered. (100.0%)

26 existing lines in 5 files now uncovered.

20526 of 23267 relevant lines covered (88.22%)

694532.67 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.91
/src/lj_opt_mem.c
1
/*
2
** Memory access optimizations.
3
** AA: Alias Analysis using high-level semantic disambiguation.
4
** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
5
** DSE: Dead-Store Elimination.
6
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
7
*/
8

9
#define lj_opt_mem_c
10
#define LUA_CORE
11

12
#include "lj_obj.h"
13

14
#if LJ_HASJIT
15

16
#include "lj_tab.h"
17
#include "lj_ir.h"
18
#include "lj_jit.h"
19
#include "lj_iropt.h"
20
#include "lj_ircall.h"
21
#include "lj_dispatch.h"
22

23
/* Some local macros to save typing. Undef'd at the end. */
24
#define IR(ref)                (&J->cur.ir[(ref)])
25
#define fins                (&J->fold.ins)
26
#define fleft                (J->fold.left)
27
#define fright                (J->fold.right)
28

29
/*
30
** Caveat #1: return value is not always a TRef -- only use with tref_ref().
31
** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
32
*/
33

34
/* Return values from alias analysis. */
35
typedef enum {
36
  ALIAS_NO,        /* The two refs CANNOT alias (exact). */
37
  ALIAS_MAY,        /* The two refs MAY alias (inexact). */
38
  ALIAS_MUST        /* The two refs MUST alias (exact). */
39
} AliasRet;
40

41
/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
42

43
/* Simplified escape analysis: check for intervening stores. */
44
static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
62✔
45
{
46
  IRRef ref = (IRRef)(ir - J->cur.ir);  /* The ref that might be stored. */
62✔
47
  for (ir++; ir < stop; ir++)
62✔
48
    if (ir->op2 == ref &&
×
49
        (ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
×
50
         ir->o == IR_USTORE || ir->o == IR_FSTORE))
×
51
      return ALIAS_MAY;  /* Reference was stored and might alias. */
52
  return ALIAS_NO;  /* Reference was not stored. */
53
}
54

55
/* Alias analysis for two different table references. */
56
static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
57
{
58
  IRIns *taba = IR(ta), *tabb = IR(tb);
59
  int newa, newb;
60
  lj_assertJ(ta != tb, "bad usage");
61
  lj_assertJ(irt_istab(taba->t) && irt_istab(tabb->t), "bad usage");
62
  /* Disambiguate new allocations. */
63
  newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
64
  newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
65
  if (newa && newb)
66
    return ALIAS_NO;  /* Two different allocations never alias. */
67
  if (newb) {  /* At least one allocation? */
68
    IRIns *tmp = taba; taba = tabb; tabb = tmp;
69
  } else if (!newa) {
70
    return ALIAS_MAY;  /* Anything else: we just don't know. */
71
  }
72
  return aa_escape(J, taba, tabb);
73
}
74

75
/* Check whether there's no aliasing table.clear. */
76
static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta)
77
{
78
  IRRef ref = J->chain[IR_CALLS];
79
  while (ref > lim) {
80
    IRIns *calls = IR(ref);
81
    if (calls->op2 == IRCALL_lj_tab_clear &&
82
        (ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO))
83
      return 0;  /* Conflict. */
84
    ref = calls->prev;
85
  }
86
  return 1;  /* No conflict. Can safely FOLD/CSE. */
87
}
88

89
/* Check whether there's no aliasing NEWREF/table.clear for the left operand. */
90
int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
61,554✔
91
{
92
  IRRef ta = fins->op1;
61,554✔
93
  IRRef ref = J->chain[IR_NEWREF];
61,554✔
94
  while (ref > lim) {
61,736✔
95
    IRIns *newref = IR(ref);
2,303✔
96
    if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
2,303✔
97
      return 0;  /* Conflict. */
98
    ref = newref->prev;
182✔
99
  }
100
  return fwd_aa_tab_clear(J, lim, ta);
59,433✔
101
}
102

103
/* Alias analysis for array and hash access using key-based disambiguation. */
104
static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
105
{
106
  IRRef ka = refa->op2;
107
  IRRef kb = refb->op2;
108
  IRIns *keya, *keyb;
109
  IRRef ta, tb;
110
  if (refa == refb)
111
    return ALIAS_MUST;  /* Shortcut for same refs. */
112
  keya = IR(ka);
113
  if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
114
  keyb = IR(kb);
115
  if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
116
  ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
117
  tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
118
  if (ka == kb) {
119
    /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
120
    if (ta == tb)
121
      return ALIAS_MUST;  /* Same key, same table. */
122
    else
123
      return aa_table(J, ta, tb);  /* Same key, possibly different table. */
124
  }
125
  if (irref_isk(ka) && irref_isk(kb))
126
    return ALIAS_NO;  /* Different constant keys. */
127
  if (refa->o == IR_AREF) {
128
    /* Disambiguate array references based on index arithmetic. */
129
    int32_t ofsa = 0, ofsb = 0;
130
    IRRef basea = ka, baseb = kb;
131
    lj_assertJ(refb->o == IR_AREF, "expected AREF");
132
    /* Gather base and offset from t[base] or t[base+-ofs]. */
133
    if (keya->o == IR_ADD && irref_isk(keya->op2)) {
134
      basea = keya->op1;
135
      ofsa = IR(keya->op2)->i;
136
      if (basea == kb && ofsa != 0)
137
        return ALIAS_NO;  /* t[base+-ofs] vs. t[base]. */
138
    }
139
    if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
140
      baseb = keyb->op1;
141
      ofsb = IR(keyb->op2)->i;
142
      if (ka == baseb && ofsb != 0)
143
        return ALIAS_NO;  /* t[base] vs. t[base+-ofs]. */
144
    }
145
    if (basea == baseb && ofsa != ofsb)
146
      return ALIAS_NO;  /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
147
  } else {
148
    /* Disambiguate hash references based on the type of their keys. */
149
    lj_assertJ((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
150
               (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF),
151
               "bad xREF IR op %d or %d", refa->o, refb->o);
152
    if (!irt_sametype(keya->t, keyb->t))
153
      return ALIAS_NO;  /* Different key types. */
154
  }
155
  if (ta == tb)
156
    return ALIAS_MAY;  /* Same table, cannot disambiguate keys. */
157
  else
158
    return aa_table(J, ta, tb);  /* Try to disambiguate tables. */
159
}
160

161
/* Array and hash load forwarding. */
162
static TRef fwd_ahload(jit_State *J, IRRef xref)
21,781✔
163
{
164
  IRIns *xr = IR(xref);
21,781✔
165
  IRRef lim = xref;  /* Search limit. */
21,781✔
166
  IRRef ref;
21,781✔
167

168
  /* Search for conflicting stores. */
169
  ref = J->chain[fins->o+IRDELTA_L2S];
21,781✔
170
  while (ref > xref) {
25,232✔
171
    IRIns *store = IR(ref);
10,165✔
172
    switch (aa_ahref(J, xr, IR(store->op1))) {
10,165✔
173
    case ALIAS_NO:   break;  /* Continue searching. */
174
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
1,525✔
175
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
5,189✔
176
    }
177
    ref = store->prev;
3,451✔
178
  }
179

180
  /* No conflicting store (yet): const-fold loads from allocations. */
181
  {
182
    IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
15,067✔
183
    IRRef tab = ir->op1;
15,067✔
184
    ir = IR(tab);
15,067✔
185
    if ((ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) &&
15,167✔
186
        fwd_aa_tab_clear(J, tab, tab)) {
100✔
187
      /* A NEWREF with a number key may end up pointing to the array part.
188
      ** But it's referenced from HSTORE and not found in the ASTORE chain.
189
      ** Or a NEWREF may rehash the table and move unrelated number keys.
190
      ** For now simply consider this a conflict without forwarding anything.
191
      */
192
      if (xr->o == IR_AREF) {
88✔
193
        IRRef ref2 = J->chain[IR_NEWREF];
44✔
194
        while (ref2 > tab) {
44✔
195
          IRIns *newref = IR(ref2);
29✔
196
          if (irt_isnum(IR(newref->op2)->t))
29✔
197
            goto cselim;
29✔
198
          ref2 = newref->prev;
×
199
        }
200
      } else {
201
        IRIns *key = IR(xr->op2);
44✔
202
        if (key->o == IR_KSLOT) key = IR(key->op1);
44✔
203
        if (irt_isnum(key->t) && J->chain[IR_NEWREF] > tab)
44✔
204
          goto cselim;
12✔
205
      }
206
      /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
207
      ** But the above search for conflicting stores was limited by xref.
208
      ** So continue searching, limited by the TNEW/TDUP. Store forwarding
209
      ** is ok, too. A conflict does NOT limit the search for a matching load.
210
      */
211
      while (ref > tab) {
146✔
212
        IRIns *store = IR(ref);
119✔
213
        switch (aa_ahref(J, xr, IR(store->op1))) {
119✔
214
        case ALIAS_NO:   break;  /* Continue searching. */
215
        case ALIAS_MAY:  goto cselim;  /* Conflicting store. */
10✔
216
        case ALIAS_MUST: return store->op2;  /* Store forwarding. */
10✔
217
        }
218
        ref = store->prev;
99✔
219
      }
220
      if (ir->o == IR_TNEW && !irt_isnil(fins->t))
27✔
221
        return 0;  /* Type instability in loop-carried dependency. */
222
      if (irt_ispri(fins->t)) {
26✔
UNCOV
223
        return TREF_PRI(irt_type(fins->t));
×
224
      } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) ||
26✔
225
                 irt_isstr(fins->t)) {
226
        TValue keyv;
26✔
227
        cTValue *tv;
26✔
228
        IRIns *key = IR(xr->op2);
26✔
229
        if (key->o == IR_KSLOT) key = IR(key->op1);
26✔
230
        lj_ir_kvalue(J->L, &keyv, key);
26✔
231
        tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
26✔
232
        if (itype2irt(tv) != irt_type(fins->t))
30✔
233
          return 0;  /* Type instability in loop-carried dependency. */
234
        if (irt_isnum(fins->t))
25✔
235
          return lj_ir_knum_u64(J, tv->u64);
22✔
236
        else if (LJ_DUALNUM && irt_isint(fins->t))
3✔
237
          return lj_ir_kint(J, intV(tv));
238
        else
239
          return lj_ir_kstr(J, strV(tv));
3✔
240
      }
241
      /* Othwerwise: don't intern as a constant. */
242
    }
243
  }
244

245
cselim:
14,979✔
246
  /* Try to find a matching load. Below the conflicting store, if any. */
247
  ref = J->chain[fins->o];
16,555✔
248
  while (ref > lim) {
21,414✔
249
    IRIns *load = IR(ref);
7,774✔
250
    if (load->op1 == xref)
7,774✔
251
      return ref;  /* Load forwarding. */
2,915✔
252
    ref = load->prev;
4,859✔
253
  }
254
  return 0;  /* Conflict or no match. */
255
}
256

257
/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
258
static TRef fwd_aload_reassoc(jit_State *J)
740✔
259
{
260
  IRIns *irx = IR(fins->op1);
740✔
261
  IRIns *key = IR(irx->op2);
740✔
262
  if (key->o == IR_ADD && irref_isk(key->op2)) {
740✔
263
    IRIns *add2 = IR(key->op1);
267✔
264
    if (add2->o == IR_ADD && irref_isk(add2->op2) &&
267✔
265
        IR(key->op2)->i == -IR(add2->op2)->i) {
25✔
266
      IRRef ref = J->chain[IR_AREF];
3✔
267
      IRRef lim = add2->op1;
3✔
268
      if (irx->op1 > lim) lim = irx->op1;
3✔
269
      while (ref > lim) {
7✔
270
        IRIns *ir = IR(ref);
7✔
271
        if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
7✔
272
          return fwd_ahload(J, ref);
3✔
273
        ref = ir->prev;
4✔
274
      }
275
    }
276
  }
277
  return 0;
278
}
279

280
/* ALOAD forwarding. */
281
TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
2,672✔
282
{
283
  IRRef ref;
2,672✔
284
  if ((ref = fwd_ahload(J, fins->op1)) ||
2,672✔
285
      (ref = fwd_aload_reassoc(J)))
740✔
286
    return ref;
1,935✔
287
  return EMITFOLD;
737✔
288
}
289

290
/* HLOAD forwarding. */
291
TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
19,106✔
292
{
293
  IRRef ref = fwd_ahload(J, fins->op1);
19,106✔
294
  if (ref)
19,106✔
295
    return ref;
296
  return EMITFOLD;
12,902✔
297
}
298

299
/* HREFK forwarding. */
300
TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
22,972✔
301
{
302
  IRRef tab = fleft->op1;
22,972✔
303
  IRRef ref = J->chain[IR_NEWREF];
22,972✔
304
  while (ref > tab) {
22,997✔
305
    IRIns *newref = IR(ref);
2,415✔
306
    if (tab == newref->op1) {
2,415✔
307
      if (fright->op1 == newref->op2 && fwd_aa_tab_clear(J, ref, tab))
1,147✔
308
        return ref;  /* Forward from NEWREF. */
309
      else
310
        goto docse;
874✔
311
    } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
1,268✔
312
      goto docse;
1,243✔
313
    }
314
    ref = newref->prev;
25✔
315
  }
316
  /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
317
  if (IR(tab)->o == IR_TDUP && fwd_aa_tab_clear(J, tab, tab))
20,582✔
318
    fins->t.irt &= ~IRT_GUARD;  /* Drop HREFK guard. */
349✔
319
docse:
20,233✔
320
  return CSEFOLD;
22,699✔
321
}
322

323
/* Check whether HREF of TNEW/TDUP can be folded to niltv. */
324
int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
38✔
325
{
326
  IRRef lim = fins->op1;  /* Search limit. */
38✔
327
  IRRef ref;
38✔
328

329
  /* The key for an ASTORE may end up in the hash part after a NEWREF. */
330
  if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
38✔
331
    ref = J->chain[IR_ASTORE];
4✔
332
    while (ref > lim) {
4✔
333
      if (ref < J->chain[IR_NEWREF])
4✔
334
        return 0;  /* Conflict. */
335
      ref = IR(ref)->prev;
×
336
    }
337
  }
338

339
  /* Search for conflicting stores. */
340
  ref = J->chain[IR_HSTORE];
34✔
341
  while (ref > lim) {
63✔
342
    IRIns *store = IR(ref);
44✔
343
    if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
44✔
344
      return 0;  /* Conflict. */
345
    ref = store->prev;
29✔
346
  }
347

348
  return 1;  /* No conflict. Can fold to niltv. */
349
}
350

351
/* ASTORE/HSTORE elimination. */
352
TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
8,602✔
353
{
354
  IRRef xref = fins->op1;  /* xREF reference. */
8,602✔
355
  IRRef val = fins->op2;  /* Stored value reference. */
8,602✔
356
  IRIns *xr = IR(xref);
8,602✔
357
  IRRef1 *refp = &J->chain[fins->o];
8,602✔
358
  IRRef ref = *refp;
8,602✔
359
  while (ref > xref) {  /* Search for redundant or conflicting stores. */
9,094✔
360
    IRIns *store = IR(ref);
2,660✔
361
    switch (aa_ahref(J, xr, IR(store->op1))) {
2,660✔
362
    case ALIAS_NO:
363
      break;  /* Continue searching. */
364
    case ALIAS_MAY:        /* Store to MAYBE the same location. */
150✔
365
      if (store->op2 != val)  /* Conflict if the value is different. */
150✔
366
        goto doemit;
148✔
367
      break;  /* Otherwise continue searching. */
368
    case ALIAS_MUST:        /* Store to the same location. */
2,020✔
369
      if (store->op2 == val)  /* Same value: drop the new store. */
2,020✔
370
        return DROPFOLD;
371
      /* Different value: try to eliminate the redundant store. */
372
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
1,994✔
373
        IRIns *ir;
1,971✔
374
        /* Check for any intervening guards (includes conflicting loads). */
375
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
3,980✔
376
          if (irt_isguard(ir->t) || ir->o == IR_CALLL)
2,127✔
377
            goto doemit;  /* No elimination possible. */
118✔
378
        /* Remove redundant store from chain and replace with NOP. */
379
        *refp = store->prev;
1,853✔
380
        store->o = IR_NOP;
1,853✔
381
        store->t.irt = IRT_NIL;
1,853✔
382
        store->op1 = store->op2 = 0;
1,853✔
383
        store->prev = 0;
1,853✔
384
        /* Now emit the new store instead. */
385
      }
386
      goto doemit;
1,876✔
387
    }
388
    ref = *(refp = &store->prev);
492✔
389
  }
390
doemit:
6,434✔
391
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
8,576✔
392
}
393

394
/* -- ULOAD forwarding ---------------------------------------------------- */
395

396
/* The current alias analysis for upvalues is very simplistic. It only
397
** disambiguates between the unique upvalues of the same function.
398
** This is good enough for now, since most upvalues are read-only.
399
**
400
** A more precise analysis would be feasible with the help of the parser:
401
** generate a unique key for every upvalue, even across all prototypes.
402
** Lacking a realistic use-case, it's unclear whether this is beneficial.
403
*/
404
static AliasRet aa_uref(IRIns *refa, IRIns *refb)
75✔
405
{
406
  if (refa->o != refb->o)
75✔
407
    return ALIAS_NO;  /* Different UREFx type. */
408
  if (refa->op1 == refb->op1) {  /* Same function. */
71✔
409
    if (refa->op2 == refb->op2)
58✔
410
      return ALIAS_MUST;  /* Same function, same upvalue idx. */
411
    else
412
      return ALIAS_NO;  /* Same function, different upvalue idx. */
29✔
413
  } else {  /* Different functions, check disambiguation hash values. */
414
    if (((refa->op2 ^ refb->op2) & 0xff))
13✔
415
      return ALIAS_NO;  /* Upvalues with different hash values cannot alias. */
416
    else
417
      return ALIAS_MAY;  /* No conclusion can be drawn for same hash value. */
11✔
418
  }
419
}
420

421
/* ULOAD forwarding. */
422
TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
905✔
423
{
424
  IRRef uref = fins->op1;
905✔
425
  IRRef lim = REF_BASE;  /* Search limit. */
905✔
426
  IRIns *xr = IR(uref);
905✔
427
  IRRef ref;
905✔
428

429
  /* Search for conflicting stores. */
430
  ref = J->chain[IR_USTORE];
905✔
431
  while (ref > lim) {
940✔
432
    IRIns *store = IR(ref);
59✔
433
    switch (aa_uref(xr, IR(store->op1))) {
114✔
434
    case ALIAS_NO:   break;  /* Continue searching. */
435
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
9✔
436
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
15✔
437
    }
438
    ref = store->prev;
35✔
439
  }
440

441
cselim:
881✔
442
  /* Try to find a matching load. Below the conflicting store, if any. */
443

444
  ref = J->chain[IR_ULOAD];
890✔
445
  while (ref > lim) {
1,226✔
446
    IRIns *ir = IR(ref);
522✔
447
    if (ir->op1 == uref ||
522✔
448
        (IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o))
343✔
449
      return ref;  /* Match for identical or equal UREFx (non-CSEable UREFO). */
186✔
450
    ref = ir->prev;
336✔
451
  }
452
  return lj_ir_emit(J);
704✔
453
}
454

455
/* USTORE elimination. */
456
TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
61✔
457
{
458
  IRRef xref = fins->op1;  /* xREF reference. */
61✔
459
  IRRef val = fins->op2;  /* Stored value reference. */
61✔
460
  IRIns *xr = IR(xref);
61✔
461
  IRRef1 *refp = &J->chain[IR_USTORE];
61✔
462
  IRRef ref = *refp;
61✔
463
  while (ref > xref) {  /* Search for redundant or conflicting stores. */
61✔
464
    IRIns *store = IR(ref);
16✔
465
    switch (aa_uref(xr, IR(store->op1))) {
32✔
466
    case ALIAS_NO:
467
      break;  /* Continue searching. */
468
    case ALIAS_MAY:        /* Store to MAYBE the same location. */
2✔
469
      if (store->op2 != val)  /* Conflict if the value is different. */
2✔
470
        goto doemit;
2✔
471
      break;  /* Otherwise continue searching. */
472
    case ALIAS_MUST:        /* Store to the same location. */
14✔
473
      if (store->op2 == val)  /* Same value: drop the new store. */
14✔
474
        return DROPFOLD;
475
      /* Different value: try to eliminate the redundant store. */
476
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
12✔
477
        IRIns *ir;
7✔
478
        /* Check for any intervening guards (includes conflicting loads). */
479
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
13✔
480
          if (irt_isguard(ir->t))
8✔
481
            goto doemit;  /* No elimination possible. */
2✔
482
        /* Remove redundant store from chain and replace with NOP. */
483
        *refp = store->prev;
5✔
484
        store->o = IR_NOP;
5✔
485
        store->t.irt = IRT_NIL;
5✔
486
        store->op1 = store->op2 = 0;
5✔
487
        store->prev = 0;
5✔
488
        if (ref+1 < J->cur.nins &&
5✔
489
            store[1].o == IR_OBAR && store[1].op1 == xref) {
5✔
490
          IRRef1 *bp = &J->chain[IR_OBAR];
×
491
          IRIns *obar;
×
492
          for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
×
493
            bp = &obar->prev;
×
494
          /* Remove OBAR, too. */
495
          *bp = obar->prev;
×
496
          obar->o = IR_NOP;
×
497
          obar->t.irt = IRT_NIL;
×
498
          obar->op1 = obar->op2 = 0;
×
499
          obar->prev = 0;
×
500
        }
501
        /* Now emit the new store instead. */
502
      }
503
      goto doemit;
10✔
504
    }
505
    ref = *(refp = &store->prev);
×
506
  }
507
doemit:
45✔
508
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
59✔
509
}
510

511
/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
512

513
/* Alias analysis for field access.
514
** Field loads are cheap and field stores are rare.
515
** Simple disambiguation based on field types is good enough.
516
*/
517
static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
57✔
518
{
519
  if (refa->op2 != refb->op2)
57✔
520
    return ALIAS_NO;  /* Different fields. */
521
  if (refa->op1 == refb->op1)
52✔
522
    return ALIAS_MUST;  /* Same field, same object. */
523
  else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
14✔
524
    return aa_table(J, refa->op1, refb->op1);  /* Disambiguate tables. */
14✔
525
  else
526
    return ALIAS_MAY;  /* Same field, possibly different object. */
527
}
528

529
/* Only the loads for mutable fields end up here (see FOLD). */
530
TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
8,278✔
531
{
532
  IRRef oref = fins->op1;  /* Object reference. */
8,278✔
533
  IRRef fid = fins->op2;  /* Field ID. */
8,278✔
534
  IRRef lim = oref;  /* Search limit. */
8,278✔
535
  IRRef ref;
8,278✔
536

537
  /* Search for conflicting stores. */
538
  ref = J->chain[IR_FSTORE];
8,278✔
539
  while (ref > oref) {
8,287✔
540
    IRIns *store = IR(ref);
37✔
541
    switch (aa_fref(J, fins, IR(store->op1))) {
47✔
542
    case ALIAS_NO:   break;  /* Continue searching. */
543
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
6✔
544
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
22✔
545
    }
546
    ref = store->prev;
9✔
547
  }
548

549
  /* No conflicting store: const-fold field loads from allocations. */
550
  if (fid == IRFL_TAB_META) {
8,250✔
551
    IRIns *ir = IR(oref);
7,778✔
552
    if (ir->o == IR_TNEW || ir->o == IR_TDUP)
7,778✔
553
      return lj_ir_knull(J, IRT_TAB);
523✔
554
  }
555

556
cselim:
7,727✔
557
  /* Try to find a matching load. Below the conflicting store, if any. */
558
  return lj_opt_cselim(J, lim);
7,733✔
559
}
560

561
/* FSTORE elimination. */
562
TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
73✔
563
{
564
  IRRef fref = fins->op1;  /* FREF reference. */
73✔
565
  IRRef val = fins->op2;  /* Stored value reference. */
73✔
566
  IRIns *xr = IR(fref);
73✔
567
  IRRef1 *refp = &J->chain[IR_FSTORE];
73✔
568
  IRRef ref = *refp;
73✔
569
  while (ref > fref) {  /* Search for redundant or conflicting stores. */
75✔
570
    IRIns *store = IR(ref);
20✔
571
    switch (aa_fref(J, xr, IR(store->op1))) {
24✔
572
    case ALIAS_NO:
573
      break;  /* Continue searching. */
574
    case ALIAS_MAY:
4✔
575
      if (store->op2 != val)  /* Conflict if the value is different. */
4✔
576
        goto doemit;
2✔
577
      break;  /* Otherwise continue searching. */
578
    case ALIAS_MUST:
16✔
579
      if (store->op2 == val)  /* Same value: drop the new store. */
16✔
580
        return DROPFOLD;
581
      /* Different value: try to eliminate the redundant store. */
582
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
7✔
583
        IRIns *ir;
4✔
584
        /* Check for any intervening guards or conflicting loads. */
585
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
4✔
586
          if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
3✔
587
            goto doemit;  /* No elimination possible. */
3✔
588
        /* Remove redundant store from chain and replace with NOP. */
589
        *refp = store->prev;
1✔
590
        store->o = IR_NOP;
1✔
591
        store->t.irt = IRT_NIL;
1✔
592
        store->op1 = store->op2 = 0;
1✔
593
        store->prev = 0;
1✔
594
        /* Now emit the new store instead. */
595
      }
596
      goto doemit;
4✔
597
    }
598
    ref = *(refp = &store->prev);
2✔
599
  }
600
doemit:
55✔
601
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
64✔
602
}
603

604
/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
605

606
/* Find cdata allocation for a reference (if any). */
607
static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
208✔
608
{
609
  while (ir->o == IR_ADD) {
224✔
610
    if (!irref_isk(ir->op1)) {
16✔
611
      IRIns *ir1 = aa_findcnew(J, IR(ir->op1));  /* Left-recursion. */
16✔
612
      if (ir1) return ir1;
16✔
613
    }
614
    if (irref_isk(ir->op2)) return NULL;
16✔
615
    ir = IR(ir->op2);  /* Flatten right-recursion. */
16✔
616
  }
617
  return ir->o == IR_CNEW ? ir : NULL;
208✔
618
}
619

620
/* Alias analysis for two cdata allocations. */
621
static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
96✔
622
{
623
  IRIns *cnewa = aa_findcnew(J, refa);
96✔
624
  IRIns *cnewb = aa_findcnew(J, refb);
96✔
625
  if (cnewa == cnewb)
96✔
626
    return ALIAS_MAY;  /* Same allocation or neither is an allocation. */
627
  if (cnewa && cnewb)
64✔
628
    return ALIAS_NO;  /* Two different allocations never alias. */
629
  if (cnewb) { cnewa = cnewb; refb = refa; }
62✔
630
  return aa_escape(J, cnewa, refb);
62✔
631
}
632

633
/* Alias analysis for XLOAD/XSTORE. */
634
static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
635
{
636
  ptrdiff_t ofsa = 0, ofsb = 0;
637
  IRIns *refb = IR(xb->op1);
638
  IRIns *basea = refa, *baseb = refb;
639
  if (refa == refb && irt_sametype(xa->t, xb->t))
640
    return ALIAS_MUST;  /* Shortcut for same refs with identical type. */
641
  /* Offset-based disambiguation. */
642
  if (refa->o == IR_ADD && irref_isk(refa->op2)) {
643
    IRIns *irk = IR(refa->op2);
644
    basea = IR(refa->op1);
645
    ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
646
                                            (ptrdiff_t)irk->i;
647
  }
648
  if (refb->o == IR_ADD && irref_isk(refb->op2)) {
649
    IRIns *irk = IR(refb->op2);
650
    baseb = IR(refb->op1);
651
    ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
652
                                            (ptrdiff_t)irk->i;
653
  }
654
  /* Treat constified pointers like base vs. base+offset. */
655
  if (basea->o == IR_KPTR && baseb->o == IR_KPTR) {
656
    ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea);
657
    baseb = basea;
658
  }
659
  /* This implements (very) strict aliasing rules.
660
  ** Different types do NOT alias, except for differences in signedness.
661
  ** Type punning through unions is allowed (but forces a reload).
662
  */
663
  if (basea == baseb) {
664
    ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
665
    if (ofsa == ofsb) {
666
      if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
667
        return ALIAS_MUST;  /* Same-sized, same-kind. May need to convert. */
668
    } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
669
      return ALIAS_NO;  /* Non-overlapping base+-o1 vs. base+-o2. */
670
    }
671
    /* NYI: extract, extend or reinterpret bits (int <-> fp). */
672
    return ALIAS_MAY;  /* Overlapping or type punning: force reload. */
673
  }
674
  if (!irt_sametype(xa->t, xb->t) &&
675
      !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
676
        ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
677
    return ALIAS_NO;
678
  /* NYI: structural disambiguation. */
679
  return aa_cnew(J, basea, baseb);  /* Try to disambiguate allocations. */
680
}
681

682
/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
683
static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
202✔
684
{
685
  IRRef ref = J->chain[op];
202✔
686
  IRRef lim = op1;
202✔
687
  if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
17✔
688
  while (ref > lim) {
1,191✔
689
    IRIns *ir = IR(ref);
1,128✔
690
    if (ir->op1 == op1 && ir->op2 == op2)
1,128✔
691
      return ref;
692
    ref = ir->prev;
989✔
693
  }
694
  return 0;
695
}
696

697
/* Reassociate index references. */
698
static IRRef reassoc_xref(jit_State *J, IRIns *ir)
132✔
699
{
700
  ptrdiff_t ofs = 0;
132✔
701
  if (ir->o == IR_ADD && irref_isk(ir->op2)) {  /* Get constant offset. */
132✔
702
    IRIns *irk = IR(ir->op2);
114✔
703
    ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
114✔
704
                                           (ptrdiff_t)irk->i;
×
705
    ir = IR(ir->op1);
114✔
706
  }
707
  if (ir->o == IR_ADD) {  /* Add of base + index. */
132✔
708
    /* Index ref > base ref for loop-carried dependences. Only check op1. */
709
    IRIns *ir2, *ir1 = IR(ir->op1);
81✔
710
    int32_t shift = 0;
81✔
711
    IRRef idxref;
81✔
712
    /* Determine index shifts. Don't bother with IR_MUL here. */
713
    if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
81✔
714
      shift = IR(ir1->op2)->i;
50✔
715
    else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
31✔
716
      shift = 1;
717
    else
718
      ir1 = ir;
23✔
719
    ir2 = IR(ir1->op1);
81✔
720
    /* A non-reassociated add. Must be a loop-carried dependence. */
721
    if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
81✔
722
      ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
73✔
723
    else
724
      return 0;
725
    idxref = ir2->op1;
73✔
726
    /* Try to CSE the reassociated chain. Give up if not found. */
727
    if (ir1 != ir &&
73✔
728
        !(idxref = reassoc_trycse(J, ir1->o, idxref,
112✔
729
                                  ir1->o == IR_BSHL ? ir1->op2 : idxref)))
56✔
730
      return 0;
731
    if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
146✔
732
      return 0;
733
    if (ofs != 0) {
73✔
734
      IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
73✔
735
      if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
146✔
736
        return 0;
737
    }
738
    return idxref;  /* Success, found a reassociated index reference. Phew. */
10✔
739
  }
740
  return 0;  /* Failure. */
741
}
742

743
/* XLOAD forwarding. */
744
TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
629✔
745
{
746
  IRRef xref = fins->op1;
629✔
747
  IRIns *xr = IR(xref);
629✔
748
  IRRef lim = xref;  /* Search limit. */
629✔
749
  IRRef ref;
629✔
750

751
  if ((fins->op2 & IRXLOAD_READONLY))
629✔
752
    goto cselim;
184✔
753
  if ((fins->op2 & IRXLOAD_VOLATILE))
445✔
754
    goto doemit;
×
755

756
  /* Search for conflicting stores. */
757
  ref = J->chain[IR_XSTORE];
445✔
758
retry:
455✔
759
  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
455✔
760
  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
455✔
761
  while (ref > lim) {
597✔
762
    IRIns *store = IR(ref);
264✔
763
    switch (aa_xref(J, xr, fins, store)) {
264✔
764
    case ALIAS_NO:   break;  /* Continue searching. */
765
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
45✔
766
    case ALIAS_MUST:
77✔
767
      /* Emit conversion if the loaded type doesn't match the forwarded type. */
768
      if (!irt_sametype(fins->t, IR(store->op2)->t)) {
77✔
769
        IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t);
9✔
770
        if (dt == IRT_I8 || dt == IRT_I16) {  /* Trunc + sign-extend. */
9✔
771
          st = dt | IRCONV_SEXT;
2✔
772
          dt = IRT_INT;
2✔
773
        } else if (dt == IRT_U8 || dt == IRT_U16) {  /* Trunc + zero-extend. */
7✔
774
          st = dt;
1✔
775
          dt = IRT_INT;
1✔
776
        }
777
        fins->ot = IRT(IR_CONV, dt);
9✔
778
        fins->op1 = store->op2;
9✔
779
        fins->op2 = (dt<<5)|st;
9✔
780
        return RETRYFOLD;
9✔
781
      }
782
      return store->op2;  /* Store forwarding. */
68✔
783
    }
784
    ref = store->prev;
142✔
785
  }
786

787
cselim:
333✔
788
  /* Try to find a matching load. Below the conflicting store, if any. */
789
  ref = J->chain[IR_XLOAD];
562✔
790
  while (ref > lim) {
637✔
791
    /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
792
    if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
136✔
793
      return ref;
61✔
794
    ref = IR(ref)->prev;
75✔
795
  }
796

797
  /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
798
  if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
501✔
799
      xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
132✔
800
    ref = J->chain[IR_XSTORE];
10✔
801
    while (ref > lim)  /* Skip stores that have already been checked. */
10✔
802
      ref = IR(ref)->prev;
×
803
    lim = xref;
10✔
804
    xr = IR(xref);
10✔
805
    goto retry;  /* Retry with the reassociated reference. */
10✔
806
  }
807
doemit:
491✔
808
  return EMITFOLD;
491✔
809
}
810

811
/* XSTORE elimination. */
812
TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
426✔
813
{
814
  IRRef xref = fins->op1;
426✔
815
  IRIns *xr = IR(xref);
426✔
816
  IRRef lim = xref;  /* Search limit. */
426✔
817
  IRRef val = fins->op2;  /* Stored value reference. */
426✔
818
  IRRef1 *refp = &J->chain[IR_XSTORE];
426✔
819
  IRRef ref = *refp;
426✔
820
  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
426✔
821
  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
426✔
822
  if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW];
426✔
823
  while (ref > lim) {  /* Search for redundant or conflicting stores. */
603✔
824
    IRIns *store = IR(ref);
242✔
825
    switch (aa_xref(J, xr, fins, store)) {
242✔
826
    case ALIAS_NO:
827
      break;  /* Continue searching. */
828
    case ALIAS_MAY:
8✔
829
      if (store->op2 != val)  /* Conflict if the value is different. */
8✔
830
        goto doemit;
7✔
831
      break;  /* Otherwise continue searching. */
832
    case ALIAS_MUST:
58✔
833
      if (store->op2 == val)  /* Same value: drop the new store. */
58✔
834
        return DROPFOLD;
835
      /* Different value: try to eliminate the redundant store. */
836
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
50✔
837
        IRIns *ir;
7✔
838
        /* Check for any intervening guards or any XLOADs (no AA performed). */
839
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
13✔
840
          if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
11✔
841
            goto doemit;  /* No elimination possible. */
5✔
842
        /* Remove redundant store from chain and replace with NOP. */
843
        *refp = store->prev;
2✔
844
        store->o = IR_NOP;
2✔
845
        store->t.irt = IRT_NIL;
2✔
846
        store->op1 = store->op2 = 0;
2✔
847
        store->prev = 0;
2✔
848
        /* Now emit the new store instead. */
849
      }
850
      goto doemit;
45✔
851
    }
852
    ref = *(refp = &store->prev);
177✔
853
  }
854
doemit:
361✔
855
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
418✔
856
}
857

858
/* -- Forwarding of lj_tab_len -------------------------------------------- */
859

860
/* This is rather simplistic right now, but better than nothing. */
861
TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J)
80✔
862
{
863
  IRRef tab = fins->op1;  /* Table reference. */
80✔
864
  IRRef lim = tab;  /* Search limit. */
80✔
865
  IRRef ref;
80✔
866

867
  /* Any ASTORE is a conflict and limits the search. */
868
  if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
80✔
869

870
  /* Search for conflicting HSTORE with numeric key. */
871
  ref = J->chain[IR_HSTORE];
80✔
872
  while (ref > lim) {
82✔
873
    IRIns *store = IR(ref);
2✔
874
    IRIns *href = IR(store->op1);
2✔
875
    IRIns *key = IR(href->op2);
2✔
876
    if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
2✔
877
      lim = ref;  /* Conflicting store found, limits search for TLEN. */
878
      break;
879
    }
880
    ref = store->prev;
2✔
881
  }
882

883
  /* Search for aliasing table.clear. */
884
  if (!fwd_aa_tab_clear(J, lim, tab))
80✔
885
    return lj_ir_emit(J);
×
886

887
  /* Try to find a matching load. Below the conflicting store, if any. */
888
  return lj_opt_cselim(J, lim);
80✔
889
}
890

891
/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
892

893
/* Check whether the previous value for a table store is non-nil.
894
** This can be derived either from a previous store or from a previous
895
** load (because all loads from tables perform a type check).
896
**
897
** The result of the analysis can be used to avoid the metatable check
898
** and the guard against HREF returning niltv. Both of these are cheap,
899
** so let's not spend too much effort on the analysis.
900
**
901
** A result of 1 is exact: previous value CANNOT be nil.
902
** A result of 0 is inexact: previous value MAY be nil.
903
*/
904
int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
42,908✔
905
{
906
  /* First check stores. */
907
  IRRef ref = J->chain[loadop+IRDELTA_L2S];
42,908✔
908
  while (ref > xref) {
43,769✔
909
    IRIns *store = IR(ref);
2,880✔
910
    if (store->op1 == xref) {  /* Same xREF. */
2,880✔
911
      /* A nil store MAY alias, but a non-nil store MUST alias. */
912
      return !irt_isnil(store->t);
2,019✔
913
    } else if (irt_isnil(store->t)) {  /* Must check any nil store. */
861✔
914
      IRRef skref = IR(store->op1)->op2;
11✔
915
      IRRef xkref = IR(xref)->op2;
11✔
916
      /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
917
      if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
11✔
918
        if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
11✔
919
          return 0;  /* A nil store with same const key or var key MAY alias. */
920
        /* Different const keys CANNOT alias. */
921
      }  /* Different key types CANNOT alias. */
922
    }  /* Other non-nil stores MAY alias. */
923
    ref = store->prev;
861✔
924
  }
925

926
  /* Check loads since nothing could be derived from stores. */
927
  ref = J->chain[loadop];
40,889✔
928
  while (ref > xref) {
41,617✔
929
    IRIns *load = IR(ref);
1,301✔
930
    if (load->op1 == xref) {  /* Same xREF. */
1,301✔
931
      /* A nil load MAY alias, but a non-nil load MUST alias. */
932
      return !irt_isnil(load->t);
573✔
933
    }  /* Other non-nil loads MAY alias. */
934
    ref = load->prev;
728✔
935
  }
936
  return 0;  /* Nothing derived at all, previous value MAY be nil. */
937
}
938

939
/* ------------------------------------------------------------------------ */
940

941
#undef IR
942
#undef fins
943
#undef fleft
944
#undef fright
945

946
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc