• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 6312339805

26 Sep 2023 11:32AM UTC coverage: 88.208% (+0.01%) from 88.198%
6312339805

push

github

igormunkin
Handle table unsinking in the presence of IRFL_TAB_NOMM.

Reported by Sergey Kaplun.

(cherry-picked from commit 0ef51b495)

Table `NEWREF` storage for non-constant keys also emits `FREF` IR with
`IRFL_TAB_NOMM` to invalidate the metamethod cache. When table creation
and `NEWREF` are sinked, the corresponding `FSTORE` is sinked too and
should be restored on trace exit. However, `snap_unsink()` doesn't
expect anything except `IRFL_TAB_META` as the second operand of `FREF`,
so the corresponding assertion fails.

This patch adds a switch-case statement to handle the `IRFL_TAB_NOMM`
case. Since `FREF` with `IRFL_TAB_NOMM` always follows some hash store,
we can avoid a duplication of the cache invalidation, so this case just
does nothing.

Sergey Kaplun:
* added the description and the test for the problem

Part of tarantool/tarantool#8825

Reviewed-by: Maxim Kokryashkin <m.kokryashkin@tarantool.org>
Reviewed-by: Sergey Bronnikov <sergeyb@tarantool.org>
Signed-off-by: Igor Munkin <imun@tarantool.org>

5337 of 5972 branches covered (0.0%)

Branch coverage included in aggregate %.

20477 of 23293 relevant lines covered (87.91%)

2732866.64 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.91
/src/lj_opt_mem.c
1
/*
2
** Memory access optimizations.
3
** AA: Alias Analysis using high-level semantic disambiguation.
4
** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
5
** DSE: Dead-Store Elimination.
6
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
7
*/
8

9
#define lj_opt_mem_c
10
#define LUA_CORE
11

12
#include "lj_obj.h"
13

14
#if LJ_HASJIT
15

16
#include "lj_tab.h"
17
#include "lj_ir.h"
18
#include "lj_jit.h"
19
#include "lj_iropt.h"
20
#include "lj_ircall.h"
21
#include "lj_dispatch.h"
22

23
/* Some local macros to save typing. Undef'd at the end. */
24
#define IR(ref)                (&J->cur.ir[(ref)])
25
#define fins                (&J->fold.ins)
26
#define fleft                (J->fold.left)
27
#define fright                (J->fold.right)
28

29
/*
30
** Caveat #1: return value is not always a TRef -- only use with tref_ref().
31
** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
32
*/
33

34
/* Return values from alias analysis. */
35
typedef enum {
36
  ALIAS_NO,        /* The two refs CANNOT alias (exact). */
37
  ALIAS_MAY,        /* The two refs MAY alias (inexact). */
38
  ALIAS_MUST        /* The two refs MUST alias (exact). */
39
} AliasRet;
40

41
/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
42

43
/* Simplified escape analysis: check for intervening stores. */
44
static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
62✔
45
{
46
  IRRef ref = (IRRef)(ir - J->cur.ir);  /* The ref that might be stored. */
62✔
47
  for (ir++; ir < stop; ir++)
62✔
48
    if (ir->op2 == ref &&
×
49
        (ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
×
50
         ir->o == IR_USTORE || ir->o == IR_FSTORE))
×
51
      return ALIAS_MAY;  /* Reference was stored and might alias. */
52
  return ALIAS_NO;  /* Reference was not stored. */
53
}
54

55
/* Alias analysis for two different table references. */
56
static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
57
{
58
  IRIns *taba = IR(ta), *tabb = IR(tb);
59
  int newa, newb;
60
  lj_assertJ(ta != tb, "bad usage");
61
  lj_assertJ(irt_istab(taba->t) && irt_istab(tabb->t), "bad usage");
62
  /* Disambiguate new allocations. */
63
  newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
64
  newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
65
  if (newa && newb)
66
    return ALIAS_NO;  /* Two different allocations never alias. */
67
  if (newb) {  /* At least one allocation? */
68
    IRIns *tmp = taba; taba = tabb; tabb = tmp;
69
  } else if (!newa) {
70
    return ALIAS_MAY;  /* Anything else: we just don't know. */
71
  }
72
  return aa_escape(J, taba, tabb);
73
}
74

75
/* Alias analysis for array and hash access using key-based disambiguation. */
76
static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
77
{
78
  IRRef ka = refa->op2;
79
  IRRef kb = refb->op2;
80
  IRIns *keya, *keyb;
81
  IRRef ta, tb;
82
  if (refa == refb)
83
    return ALIAS_MUST;  /* Shortcut for same refs. */
84
  keya = IR(ka);
85
  if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
86
  keyb = IR(kb);
87
  if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
88
  ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
89
  tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
90
  if (ka == kb) {
91
    /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
92
    if (ta == tb)
93
      return ALIAS_MUST;  /* Same key, same table. */
94
    else
95
      return aa_table(J, ta, tb);  /* Same key, possibly different table. */
96
  }
97
  if (irref_isk(ka) && irref_isk(kb))
98
    return ALIAS_NO;  /* Different constant keys. */
99
  if (refa->o == IR_AREF) {
100
    /* Disambiguate array references based on index arithmetic. */
101
    int32_t ofsa = 0, ofsb = 0;
102
    IRRef basea = ka, baseb = kb;
103
    lj_assertJ(refb->o == IR_AREF, "expected AREF");
104
    /* Gather base and offset from t[base] or t[base+-ofs]. */
105
    if (keya->o == IR_ADD && irref_isk(keya->op2)) {
106
      basea = keya->op1;
107
      ofsa = IR(keya->op2)->i;
108
      if (basea == kb && ofsa != 0)
109
        return ALIAS_NO;  /* t[base+-ofs] vs. t[base]. */
110
    }
111
    if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
112
      baseb = keyb->op1;
113
      ofsb = IR(keyb->op2)->i;
114
      if (ka == baseb && ofsb != 0)
115
        return ALIAS_NO;  /* t[base] vs. t[base+-ofs]. */
116
    }
117
    if (basea == baseb && ofsa != ofsb)
118
      return ALIAS_NO;  /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
119
  } else {
120
    /* Disambiguate hash references based on the type of their keys. */
121
    lj_assertJ((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
122
               (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF),
123
               "bad xREF IR op %d or %d", refa->o, refb->o);
124
    if (!irt_sametype(keya->t, keyb->t))
125
      return ALIAS_NO;  /* Different key types. */
126
  }
127
  if (ta == tb)
128
    return ALIAS_MAY;  /* Same table, cannot disambiguate keys. */
129
  else
130
    return aa_table(J, ta, tb);  /* Try to disambiguate tables. */
131
}
132

133
/* Array and hash load forwarding. */
134
static TRef fwd_ahload(jit_State *J, IRRef xref)
9,647✔
135
{
136
  IRIns *xr = IR(xref);
9,647✔
137
  IRRef lim = xref;  /* Search limit. */
9,647✔
138
  IRRef ref;
9,647✔
139

140
  /* Search for conflicting stores. */
141
  ref = J->chain[fins->o+IRDELTA_L2S];
9,647✔
142
  while (ref > xref) {
10,542✔
143
    IRIns *store = IR(ref);
3,767✔
144
    switch (aa_ahref(J, xr, IR(store->op1))) {
3,767✔
145
    case ALIAS_NO:   break;  /* Continue searching. */
146
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
351✔
147
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
2,521✔
148
    }
149
    ref = store->prev;
895✔
150
  }
151

152
  /* No conflicting store (yet): const-fold loads from allocations. */
153
  {
154
    IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
6,775✔
155
    IRRef tab = ir->op1;
6,775✔
156
    ir = IR(tab);
6,775✔
157
    if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) {
6,775✔
158
      /* A NEWREF with a number key may end up pointing to the array part.
159
      ** But it's referenced from HSTORE and not found in the ASTORE chain.
160
      ** Or a NEWREF may rehash the table and move unrelated number keys.
161
      ** For now simply consider this a conflict without forwarding anything.
162
      */
163
      if (xr->o == IR_AREF) {
57✔
164
        IRRef ref2 = J->chain[IR_NEWREF];
25✔
165
        while (ref2 > tab) {
25✔
166
          IRIns *newref = IR(ref2);
19✔
167
          if (irt_isnum(IR(newref->op2)->t))
19✔
168
            goto cselim;
19✔
169
          ref2 = newref->prev;
×
170
        }
171
      } else {
172
        IRIns *key = IR(xr->op2);
32✔
173
        if (key->o == IR_KSLOT) key = IR(key->op1);
32✔
174
        if (irt_isnum(key->t) && J->chain[IR_NEWREF] > tab)
32✔
175
          goto cselim;
12✔
176
      }
177
      /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
178
      ** But the above search for conflicting stores was limited by xref.
179
      ** So continue searching, limited by the TNEW/TDUP. Store forwarding
180
      ** is ok, too. A conflict does NOT limit the search for a matching load.
181
      */
182
      while (ref > tab) {
80✔
183
        IRIns *store = IR(ref);
70✔
184
        switch (aa_ahref(J, xr, IR(store->op1))) {
70✔
185
        case ALIAS_NO:   break;  /* Continue searching. */
186
        case ALIAS_MAY:  goto cselim;  /* Conflicting store. */
2✔
187
        case ALIAS_MUST: return store->op2;  /* Store forwarding. */
14✔
188
        }
189
        ref = store->prev;
54✔
190
      }
191
      if (ir->o == IR_TNEW && !irt_isnil(fins->t))
10✔
192
        return 0;  /* Type instability in loop-carried dependency. */
193
      if (irt_ispri(fins->t)) {
9✔
194
        return TREF_PRI(irt_type(fins->t));
×
195
      } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) ||
9✔
196
                 irt_isstr(fins->t)) {
197
        TValue keyv;
9✔
198
        cTValue *tv;
9✔
199
        IRIns *key = IR(xr->op2);
9✔
200
        if (key->o == IR_KSLOT) key = IR(key->op1);
9✔
201
        lj_ir_kvalue(J->L, &keyv, key);
9✔
202
        tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
9✔
203
        if (itype2irt(tv) != irt_type(fins->t))
12✔
204
          return 0;  /* Type instability in loop-carried dependency. */
205
        if (irt_isnum(fins->t))
8✔
206
          return lj_ir_knum_u64(J, tv->u64);
6✔
207
        else if (LJ_DUALNUM && irt_isint(fins->t))
2✔
208
          return lj_ir_kint(J, intV(tv));
209
        else
210
          return lj_ir_kstr(J, strV(tv));
2✔
211
      }
212
      /* Othwerwise: don't intern as a constant. */
213
    }
214
  }
215

216
cselim:
6,718✔
217
  /* Try to find a matching load. Below the conflicting store, if any. */
218
  ref = J->chain[fins->o];
7,102✔
219
  while (ref > lim) {
8,938✔
220
    IRIns *load = IR(ref);
3,442✔
221
    if (load->op1 == xref)
3,442✔
222
      return ref;  /* Load forwarding. */
1,606✔
223
    ref = load->prev;
1,836✔
224
  }
225
  return 0;  /* Conflict or no match. */
226
}
227

228
/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
229
static TRef fwd_aload_reassoc(jit_State *J)
689✔
230
{
231
  IRIns *irx = IR(fins->op1);
689✔
232
  IRIns *key = IR(irx->op2);
689✔
233
  if (key->o == IR_ADD && irref_isk(key->op2)) {
689✔
234
    IRIns *add2 = IR(key->op1);
253✔
235
    if (add2->o == IR_ADD && irref_isk(add2->op2) &&
253✔
236
        IR(key->op2)->i == -IR(add2->op2)->i) {
22✔
237
      IRRef ref = J->chain[IR_AREF];
3✔
238
      IRRef lim = add2->op1;
3✔
239
      if (irx->op1 > lim) lim = irx->op1;
3✔
240
      while (ref > lim) {
7✔
241
        IRIns *ir = IR(ref);
7✔
242
        if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
7✔
243
          return fwd_ahload(J, ref);
3✔
244
        ref = ir->prev;
4✔
245
      }
246
    }
247
  }
248
  return 0;
249
}
250

251
/* ALOAD forwarding. */
252
TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
2,524✔
253
{
254
  IRRef ref;
2,524✔
255
  if ((ref = fwd_ahload(J, fins->op1)) ||
2,524✔
256
      (ref = fwd_aload_reassoc(J)))
689✔
257
    return ref;
1,838✔
258
  return EMITFOLD;
686✔
259
}
260

261
/* HLOAD forwarding. */
262
TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
7,120✔
263
{
264
  IRRef ref = fwd_ahload(J, fins->op1);
7,120✔
265
  if (ref)
7,120✔
266
    return ref;
267
  return EMITFOLD;
4,809✔
268
}
269

270
/* HREFK forwarding. */
271
TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
7,673✔
272
{
273
  IRRef tab = fleft->op1;
7,673✔
274
  IRRef ref = J->chain[IR_NEWREF];
7,673✔
275
  while (ref > tab) {
7,685✔
276
    IRIns *newref = IR(ref);
593✔
277
    if (tab == newref->op1) {
593✔
278
      if (fright->op1 == newref->op2)
303✔
279
        return ref;  /* Forward from NEWREF. */
280
      else
281
        goto docse;
240✔
282
    } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
290✔
283
      goto docse;
278✔
284
    }
285
    ref = newref->prev;
12✔
286
  }
287
  /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
288
  if (IR(tab)->o == IR_TDUP)
7,092✔
289
    fins->t.irt &= ~IRT_GUARD;  /* Drop HREFK guard. */
121✔
290
docse:
6,971✔
291
  return CSEFOLD;
7,610✔
292
}
293

294
/* Check whether HREF of TNEW/TDUP can be folded to niltv. */
295
int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
18✔
296
{
297
  IRRef lim = fins->op1;  /* Search limit. */
18✔
298
  IRRef ref;
18✔
299

300
  /* The key for an ASTORE may end up in the hash part after a NEWREF. */
301
  if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
18✔
302
    ref = J->chain[IR_ASTORE];
4✔
303
    while (ref > lim) {
4✔
304
      if (ref < J->chain[IR_NEWREF])
4✔
305
        return 0;  /* Conflict. */
306
      ref = IR(ref)->prev;
×
307
    }
308
  }
309

310
  /* Search for conflicting stores. */
311
  ref = J->chain[IR_HSTORE];
14✔
312
  while (ref > lim) {
35✔
313
    IRIns *store = IR(ref);
21✔
314
    if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
21✔
315
      return 0;  /* Conflict. */
316
    ref = store->prev;
21✔
317
  }
318

319
  return 1;  /* No conflict. Can fold to niltv. */
320
}
321

322
/* Check whether there's no aliasing table.clear. */
323
static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta)
324
{
325
  IRRef ref = J->chain[IR_CALLS];
326
  while (ref > lim) {
327
    IRIns *calls = IR(ref);
328
    if (calls->op2 == IRCALL_lj_tab_clear &&
329
        (ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO))
330
      return 0;  /* Conflict. */
331
    ref = calls->prev;
332
  }
333
  return 1;  /* No conflict. Can safely FOLD/CSE. */
334
}
335

336
/* Check whether there's no aliasing NEWREF/table.clear for the left operand. */
337
int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
28,977✔
338
{
339
  IRRef ta = fins->op1;
28,977✔
340
  IRRef ref = J->chain[IR_NEWREF];
28,977✔
341
  while (ref > lim) {
28,993✔
342
    IRIns *newref = IR(ref);
1,116✔
343
    if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
1,116✔
344
      return 0;  /* Conflict. */
345
    ref = newref->prev;
16✔
346
  }
347
  return fwd_aa_tab_clear(J, lim, ta);
27,877✔
348
}
349

350
/* ASTORE/HSTORE elimination. */
351
TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
4,843✔
352
{
353
  IRRef xref = fins->op1;  /* xREF reference. */
4,843✔
354
  IRRef val = fins->op2;  /* Stored value reference. */
4,843✔
355
  IRIns *xr = IR(xref);
4,843✔
356
  IRRef1 *refp = &J->chain[fins->o];
4,843✔
357
  IRRef ref = *refp;
4,843✔
358
  while (ref > xref) {  /* Search for redundant or conflicting stores. */
4,995✔
359
    IRIns *store = IR(ref);
2,036✔
360
    switch (aa_ahref(J, xr, IR(store->op1))) {
2,036✔
361
    case ALIAS_NO:
362
      break;  /* Continue searching. */
363
    case ALIAS_MAY:        /* Store to MAYBE the same location. */
54✔
364
      if (store->op2 != val)  /* Conflict if the value is different. */
54✔
365
        goto doemit;
52✔
366
      break;  /* Otherwise continue searching. */
367
    case ALIAS_MUST:        /* Store to the same location. */
1,832✔
368
      if (store->op2 == val)  /* Same value: drop the new store. */
1,832✔
369
        return DROPFOLD;
370
      /* Different value: try to eliminate the redundant store. */
371
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
1,809✔
372
        IRIns *ir;
1,789✔
373
        /* Check for any intervening guards (includes conflicting loads). */
374
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
3,562✔
375
          if (irt_isguard(ir->t) || ir->o == IR_CALLL)
1,826✔
376
            goto doemit;  /* No elimination possible. */
53✔
377
        /* Remove redundant store from chain and replace with NOP. */
378
        *refp = store->prev;
1,736✔
379
        store->o = IR_NOP;
1,736✔
380
        store->t.irt = IRT_NIL;
1,736✔
381
        store->op1 = store->op2 = 0;
1,736✔
382
        store->prev = 0;
1,736✔
383
        /* Now emit the new store instead. */
384
      }
385
      goto doemit;
1,756✔
386
    }
387
    ref = *(refp = &store->prev);
152✔
388
  }
389
doemit:
2,959✔
390
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
4,820✔
391
}
392

393
/* -- ULOAD forwarding ---------------------------------------------------- */
394

395
/* The current alias analysis for upvalues is very simplistic. It only
396
** disambiguates between the unique upvalues of the same function.
397
** This is good enough for now, since most upvalues are read-only.
398
**
399
** A more precise analysis would be feasible with the help of the parser:
400
** generate a unique key for every upvalue, even across all prototypes.
401
** Lacking a realistic use-case, it's unclear whether this is beneficial.
402
*/
403
static AliasRet aa_uref(IRIns *refa, IRIns *refb)
73✔
404
{
405
  if (refa->o != refb->o)
73✔
406
    return ALIAS_NO;  /* Different UREFx type. */
407
  if (refa->op1 == refb->op1) {  /* Same function. */
69✔
408
    if (refa->op2 == refb->op2)
56✔
409
      return ALIAS_MUST;  /* Same function, same upvalue idx. */
410
    else
411
      return ALIAS_NO;  /* Same function, different upvalue idx. */
29✔
412
  } else {  /* Different functions, check disambiguation hash values. */
413
    if (((refa->op2 ^ refb->op2) & 0xff))
13✔
414
      return ALIAS_NO;  /* Upvalues with different hash values cannot alias. */
415
    else
416
      return ALIAS_MAY;  /* No conclusion can be drawn for same hash value. */
10✔
417
  }
418
}
419

420
/* ULOAD forwarding. */
421
TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
475✔
422
{
423
  IRRef uref = fins->op1;
475✔
424
  IRRef lim = REF_BASE;  /* Search limit. */
475✔
425
  IRIns *xr = IR(uref);
475✔
426
  IRRef ref;
475✔
427

428
  /* Search for conflicting stores. */
429
  ref = J->chain[IR_USTORE];
475✔
430
  while (ref > lim) {
511✔
431
    IRIns *store = IR(ref);
58✔
432
    switch (aa_uref(xr, IR(store->op1))) {
112✔
433
    case ALIAS_NO:   break;  /* Continue searching. */
434
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
8✔
435
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
14✔
436
    }
437
    ref = store->prev;
36✔
438
  }
439

440
cselim:
453✔
441
  /* Try to find a matching load. Below the conflicting store, if any. */
442

443
  ref = J->chain[IR_ULOAD];
461✔
444
  while (ref > lim) {
684✔
445
    IRIns *ir = IR(ref);
332✔
446
    if (ir->op1 == uref ||
332✔
447
        (IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o))
229✔
448
      return ref;  /* Match for identical or equal UREFx (non-CSEable UREFO). */
109✔
449
    ref = ir->prev;
223✔
450
  }
451
  return lj_ir_emit(J);
352✔
452
}
453

454
/* USTORE elimination. */
455
TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
58✔
456
{
457
  IRRef xref = fins->op1;  /* xREF reference. */
58✔
458
  IRRef val = fins->op2;  /* Stored value reference. */
58✔
459
  IRIns *xr = IR(xref);
58✔
460
  IRRef1 *refp = &J->chain[IR_USTORE];
58✔
461
  IRRef ref = *refp;
58✔
462
  while (ref > xref) {  /* Search for redundant or conflicting stores. */
58✔
463
    IRIns *store = IR(ref);
15✔
464
    switch (aa_uref(xr, IR(store->op1))) {
30✔
465
    case ALIAS_NO:
466
      break;  /* Continue searching. */
467
    case ALIAS_MAY:        /* Store to MAYBE the same location. */
2✔
468
      if (store->op2 != val)  /* Conflict if the value is different. */
2✔
469
        goto doemit;
2✔
470
      break;  /* Otherwise continue searching. */
471
    case ALIAS_MUST:        /* Store to the same location. */
13✔
472
      if (store->op2 == val)  /* Same value: drop the new store. */
13✔
473
        return DROPFOLD;
474
      /* Different value: try to eliminate the redundant store. */
475
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
12✔
476
        IRIns *ir;
7✔
477
        /* Check for any intervening guards (includes conflicting loads). */
478
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
13✔
479
          if (irt_isguard(ir->t))
8✔
480
            goto doemit;  /* No elimination possible. */
2✔
481
        /* Remove redundant store from chain and replace with NOP. */
482
        *refp = store->prev;
5✔
483
        store->o = IR_NOP;
5✔
484
        store->t.irt = IRT_NIL;
5✔
485
        store->op1 = store->op2 = 0;
5✔
486
        store->prev = 0;
5✔
487
        if (ref+1 < J->cur.nins &&
5✔
488
            store[1].o == IR_OBAR && store[1].op1 == xref) {
5✔
489
          IRRef1 *bp = &J->chain[IR_OBAR];
×
490
          IRIns *obar;
×
491
          for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
×
492
            bp = &obar->prev;
×
493
          /* Remove OBAR, too. */
494
          *bp = obar->prev;
×
495
          obar->o = IR_NOP;
×
496
          obar->t.irt = IRT_NIL;
×
497
          obar->op1 = obar->op2 = 0;
×
498
          obar->prev = 0;
×
499
        }
500
        /* Now emit the new store instead. */
501
      }
502
      goto doemit;
10✔
503
    }
504
    ref = *(refp = &store->prev);
×
505
  }
506
doemit:
43✔
507
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
57✔
508
}
509

510
/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
511

512
/* Alias analysis for field access.
513
** Field loads are cheap and field stores are rare.
514
** Simple disambiguation based on field types is good enough.
515
*/
516
static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
57✔
517
{
518
  if (refa->op2 != refb->op2)
57✔
519
    return ALIAS_NO;  /* Different fields. */
520
  if (refa->op1 == refb->op1)
52✔
521
    return ALIAS_MUST;  /* Same field, same object. */
522
  else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
14✔
523
    return aa_table(J, refa->op1, refb->op1);  /* Disambiguate tables. */
14✔
524
  else
525
    return ALIAS_MAY;  /* Same field, possibly different object. */
526
}
527

528
/* Only the loads for mutable fields end up here (see FOLD). */
529
TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
3,711✔
530
{
531
  IRRef oref = fins->op1;  /* Object reference. */
3,711✔
532
  IRRef fid = fins->op2;  /* Field ID. */
3,711✔
533
  IRRef lim = oref;  /* Search limit. */
3,711✔
534
  IRRef ref;
3,711✔
535

536
  /* Search for conflicting stores. */
537
  ref = J->chain[IR_FSTORE];
3,711✔
538
  while (ref > oref) {
3,720✔
539
    IRIns *store = IR(ref);
37✔
540
    switch (aa_fref(J, fins, IR(store->op1))) {
47✔
541
    case ALIAS_NO:   break;  /* Continue searching. */
542
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
6✔
543
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
22✔
544
    }
545
    ref = store->prev;
9✔
546
  }
547

548
  /* No conflicting store: const-fold field loads from allocations. */
549
  if (fid == IRFL_TAB_META) {
3,683✔
550
    IRIns *ir = IR(oref);
3,250✔
551
    if (ir->o == IR_TNEW || ir->o == IR_TDUP)
3,250✔
552
      return lj_ir_knull(J, IRT_TAB);
265✔
553
  }
554

555
cselim:
3,418✔
556
  /* Try to find a matching load. Below the conflicting store, if any. */
557
  return lj_opt_cselim(J, lim);
3,424✔
558
}
559

560
/* FSTORE elimination. */
561
TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
73✔
562
{
563
  IRRef fref = fins->op1;  /* FREF reference. */
73✔
564
  IRRef val = fins->op2;  /* Stored value reference. */
73✔
565
  IRIns *xr = IR(fref);
73✔
566
  IRRef1 *refp = &J->chain[IR_FSTORE];
73✔
567
  IRRef ref = *refp;
73✔
568
  while (ref > fref) {  /* Search for redundant or conflicting stores. */
75✔
569
    IRIns *store = IR(ref);
20✔
570
    switch (aa_fref(J, xr, IR(store->op1))) {
24✔
571
    case ALIAS_NO:
572
      break;  /* Continue searching. */
573
    case ALIAS_MAY:
4✔
574
      if (store->op2 != val)  /* Conflict if the value is different. */
4✔
575
        goto doemit;
2✔
576
      break;  /* Otherwise continue searching. */
577
    case ALIAS_MUST:
16✔
578
      if (store->op2 == val)  /* Same value: drop the new store. */
16✔
579
        return DROPFOLD;
580
      /* Different value: try to eliminate the redundant store. */
581
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
7✔
582
        IRIns *ir;
4✔
583
        /* Check for any intervening guards or conflicting loads. */
584
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
4✔
585
          if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
3✔
586
            goto doemit;  /* No elimination possible. */
3✔
587
        /* Remove redundant store from chain and replace with NOP. */
588
        *refp = store->prev;
1✔
589
        store->o = IR_NOP;
1✔
590
        store->t.irt = IRT_NIL;
1✔
591
        store->op1 = store->op2 = 0;
1✔
592
        store->prev = 0;
1✔
593
        /* Now emit the new store instead. */
594
      }
595
      goto doemit;
4✔
596
    }
597
    ref = *(refp = &store->prev);
2✔
598
  }
599
doemit:
55✔
600
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
64✔
601
}
602

603
/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
604

605
/* Find cdata allocation for a reference (if any). */
606
static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
206✔
607
{
608
  while (ir->o == IR_ADD) {
222✔
609
    if (!irref_isk(ir->op1)) {
16✔
610
      IRIns *ir1 = aa_findcnew(J, IR(ir->op1));  /* Left-recursion. */
16✔
611
      if (ir1) return ir1;
16✔
612
    }
613
    if (irref_isk(ir->op2)) return NULL;
16✔
614
    ir = IR(ir->op2);  /* Flatten right-recursion. */
16✔
615
  }
616
  return ir->o == IR_CNEW ? ir : NULL;
206✔
617
}
618

619
/* Alias analysis for two cdata allocations. */
620
static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
95✔
621
{
622
  IRIns *cnewa = aa_findcnew(J, refa);
95✔
623
  IRIns *cnewb = aa_findcnew(J, refb);
95✔
624
  if (cnewa == cnewb)
95✔
625
    return ALIAS_MAY;  /* Same allocation or neither is an allocation. */
626
  if (cnewa && cnewb)
64✔
627
    return ALIAS_NO;  /* Two different allocations never alias. */
628
  if (cnewb) { cnewa = cnewb; refb = refa; }
62✔
629
  return aa_escape(J, cnewa, refb);
62✔
630
}
631

632
/* Alias analysis for XLOAD/XSTORE. */
633
static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
634
{
635
  ptrdiff_t ofsa = 0, ofsb = 0;
636
  IRIns *refb = IR(xb->op1);
637
  IRIns *basea = refa, *baseb = refb;
638
  if (refa == refb && irt_sametype(xa->t, xb->t))
639
    return ALIAS_MUST;  /* Shortcut for same refs with identical type. */
640
  /* Offset-based disambiguation. */
641
  if (refa->o == IR_ADD && irref_isk(refa->op2)) {
642
    IRIns *irk = IR(refa->op2);
643
    basea = IR(refa->op1);
644
    ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
645
                                            (ptrdiff_t)irk->i;
646
  }
647
  if (refb->o == IR_ADD && irref_isk(refb->op2)) {
648
    IRIns *irk = IR(refb->op2);
649
    baseb = IR(refb->op1);
650
    ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
651
                                            (ptrdiff_t)irk->i;
652
  }
653
  /* Treat constified pointers like base vs. base+offset. */
654
  if (basea->o == IR_KPTR && baseb->o == IR_KPTR) {
655
    ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea);
656
    baseb = basea;
657
  }
658
  /* This implements (very) strict aliasing rules.
659
  ** Different types do NOT alias, except for differences in signedness.
660
  ** Type punning through unions is allowed (but forces a reload).
661
  */
662
  if (basea == baseb) {
663
    ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
664
    if (ofsa == ofsb) {
665
      if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
666
        return ALIAS_MUST;  /* Same-sized, same-kind. May need to convert. */
667
    } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
668
      return ALIAS_NO;  /* Non-overlapping base+-o1 vs. base+-o2. */
669
    }
670
    /* NYI: extract, extend or reinterpret bits (int <-> fp). */
671
    return ALIAS_MAY;  /* Overlapping or type punning: force reload. */
672
  }
673
  if (!irt_sametype(xa->t, xb->t) &&
674
      !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
675
        ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
676
    return ALIAS_NO;
677
  /* NYI: structural disambiguation. */
678
  return aa_cnew(J, basea, baseb);  /* Try to disambiguate allocations. */
679
}
680

681
/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
682
static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
202✔
683
{
684
  IRRef ref = J->chain[op];
202✔
685
  IRRef lim = op1;
202✔
686
  if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
17✔
687
  while (ref > lim) {
1,191✔
688
    IRIns *ir = IR(ref);
1,128✔
689
    if (ir->op1 == op1 && ir->op2 == op2)
1,128✔
690
      return ref;
691
    ref = ir->prev;
989✔
692
  }
693
  return 0;
694
}
695

696
/* Reassociate index references. */
697
static IRRef reassoc_xref(jit_State *J, IRIns *ir)
129✔
698
{
699
  ptrdiff_t ofs = 0;
129✔
700
  if (ir->o == IR_ADD && irref_isk(ir->op2)) {  /* Get constant offset. */
129✔
701
    IRIns *irk = IR(ir->op2);
111✔
702
    ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
111✔
703
                                           (ptrdiff_t)irk->i;
×
704
    ir = IR(ir->op1);
111✔
705
  }
706
  if (ir->o == IR_ADD) {  /* Add of base + index. */
129✔
707
    /* Index ref > base ref for loop-carried dependences. Only check op1. */
708
    IRIns *ir2, *ir1 = IR(ir->op1);
78✔
709
    int32_t shift = 0;
78✔
710
    IRRef idxref;
78✔
711
    /* Determine index shifts. Don't bother with IR_MUL here. */
712
    if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
78✔
713
      shift = IR(ir1->op2)->i;
50✔
714
    else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
28✔
715
      shift = 1;
716
    else
717
      ir1 = ir;
20✔
718
    ir2 = IR(ir1->op1);
78✔
719
    /* A non-reassociated add. Must be a loop-carried dependence. */
720
    if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
78✔
721
      ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
73✔
722
    else
723
      return 0;
724
    idxref = ir2->op1;
73✔
725
    /* Try to CSE the reassociated chain. Give up if not found. */
726
    if (ir1 != ir &&
73✔
727
        !(idxref = reassoc_trycse(J, ir1->o, idxref,
112✔
728
                                  ir1->o == IR_BSHL ? ir1->op2 : idxref)))
56✔
729
      return 0;
730
    if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
146✔
731
      return 0;
732
    if (ofs != 0) {
73✔
733
      IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
73✔
734
      if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
146✔
735
        return 0;
736
    }
737
    return idxref;  /* Success, found a reassociated index reference. Phew. */
10✔
738
  }
739
  return 0;  /* Failure. */
740
}
741

742
/* XLOAD forwarding. */
743
TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
558✔
744
{
745
  IRRef xref = fins->op1;
558✔
746
  IRIns *xr = IR(xref);
558✔
747
  IRRef lim = xref;  /* Search limit. */
558✔
748
  IRRef ref;
558✔
749

750
  if ((fins->op2 & IRXLOAD_READONLY))
558✔
751
    goto cselim;
159✔
752
  if ((fins->op2 & IRXLOAD_VOLATILE))
399✔
753
    goto doemit;
×
754

755
  /* Search for conflicting stores. */
756
  ref = J->chain[IR_XSTORE];
399✔
757
retry:
409✔
758
  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
409✔
759
  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
409✔
760
  while (ref > lim) {
548✔
761
    IRIns *store = IR(ref);
258✔
762
    switch (aa_xref(J, xr, fins, store)) {
258✔
763
    case ALIAS_NO:   break;  /* Continue searching. */
764
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
45✔
765
    case ALIAS_MUST:
74✔
766
      /* Emit conversion if the loaded type doesn't match the forwarded type. */
767
      if (!irt_sametype(fins->t, IR(store->op2)->t)) {
74✔
768
        IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t);
9✔
769
        if (dt == IRT_I8 || dt == IRT_I16) {  /* Trunc + sign-extend. */
9✔
770
          st = dt | IRCONV_SEXT;
2✔
771
          dt = IRT_INT;
2✔
772
        } else if (dt == IRT_U8 || dt == IRT_U16) {  /* Trunc + zero-extend. */
7✔
773
          st = dt;
1✔
774
          dt = IRT_INT;
1✔
775
        }
776
        fins->ot = IRT(IR_CONV, dt);
9✔
777
        fins->op1 = store->op2;
9✔
778
        fins->op2 = (dt<<5)|st;
9✔
779
        return RETRYFOLD;
9✔
780
      }
781
      return store->op2;  /* Store forwarding. */
65✔
782
    }
783
    ref = store->prev;
139✔
784
  }
785

786
cselim:
290✔
787
  /* Try to find a matching load. Below the conflicting store, if any. */
788
  ref = J->chain[IR_XLOAD];
494✔
789
  while (ref > lim) {
565✔
790
    /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
791
    if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
127✔
792
      return ref;
56✔
793
    ref = IR(ref)->prev;
71✔
794
  }
795

796
  /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
797
  if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
438✔
798
      xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
129✔
799
    ref = J->chain[IR_XSTORE];
10✔
800
    while (ref > lim)  /* Skip stores that have already been checked. */
10✔
801
      ref = IR(ref)->prev;
×
802
    lim = xref;
10✔
803
    xr = IR(xref);
10✔
804
    goto retry;  /* Retry with the reassociated reference. */
10✔
805
  }
806
doemit:
428✔
807
  return EMITFOLD;
428✔
808
}
809

810
/* XSTORE elimination. */
811
TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
414✔
812
{
813
  IRRef xref = fins->op1;
414✔
814
  IRIns *xr = IR(xref);
414✔
815
  IRRef lim = xref;  /* Search limit. */
414✔
816
  IRRef val = fins->op2;  /* Stored value reference. */
414✔
817
  IRRef1 *refp = &J->chain[IR_XSTORE];
414✔
818
  IRRef ref = *refp;
414✔
819
  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
414✔
820
  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
414✔
821
  if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW];
414✔
822
  while (ref > lim) {  /* Search for redundant or conflicting stores. */
588✔
823
    IRIns *store = IR(ref);
236✔
824
    switch (aa_xref(J, xr, fins, store)) {
236✔
825
    case ALIAS_NO:
826
      break;  /* Continue searching. */
827
    case ALIAS_MAY:
7✔
828
      if (store->op2 != val)  /* Conflict if the value is different. */
7✔
829
        goto doemit;
7✔
830
      break;  /* Otherwise continue searching. */
831
    case ALIAS_MUST:
55✔
832
      if (store->op2 == val)  /* Same value: drop the new store. */
55✔
833
        return DROPFOLD;
834
      /* Different value: try to eliminate the redundant store. */
835
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
50✔
836
        IRIns *ir;
7✔
837
        /* Check for any intervening guards or any XLOADs (no AA performed). */
838
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
13✔
839
          if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
11✔
840
            goto doemit;  /* No elimination possible. */
5✔
841
        /* Remove redundant store from chain and replace with NOP. */
842
        *refp = store->prev;
2✔
843
        store->o = IR_NOP;
2✔
844
        store->t.irt = IRT_NIL;
2✔
845
        store->op1 = store->op2 = 0;
2✔
846
        store->prev = 0;
2✔
847
        /* Now emit the new store instead. */
848
      }
849
      goto doemit;
45✔
850
    }
851
    ref = *(refp = &store->prev);
174✔
852
  }
853
doemit:
352✔
854
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
409✔
855
}
856

857
/* -- Forwarding of lj_tab_len -------------------------------------------- */
858

859
/* This is rather simplistic right now, but better than nothing. */
860
TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J)
108✔
861
{
862
  IRRef tab = fins->op1;  /* Table reference. */
108✔
863
  IRRef lim = tab;  /* Search limit. */
108✔
864
  IRRef ref;
108✔
865

866
  /* Any ASTORE is a conflict and limits the search. */
867
  if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
108✔
868

869
  /* Search for conflicting HSTORE with numeric key. */
870
  ref = J->chain[IR_HSTORE];
108✔
871
  while (ref > lim) {
114✔
872
    IRIns *store = IR(ref);
10✔
873
    IRIns *href = IR(store->op1);
10✔
874
    IRIns *key = IR(href->op2);
10✔
875
    if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
10✔
876
      lim = ref;  /* Conflicting store found, limits search for TLEN. */
877
      break;
878
    }
879
    ref = store->prev;
6✔
880
  }
881

882
  /* Search for aliasing table.clear. */
883
  if (!fwd_aa_tab_clear(J, lim, tab))
108✔
884
    return lj_ir_emit(J);
×
885

886
  /* Try to find a matching load. Below the conflicting store, if any. */
887
  return lj_opt_cselim(J, lim);
108✔
888
}
889

890
/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
891

892
/* Check whether the previous value for a table store is non-nil.
893
** This can be derived either from a previous store or from a previous
894
** load (because all loads from tables perform a type check).
895
**
896
** The result of the analysis can be used to avoid the metatable check
897
** and the guard against HREF returning niltv. Both of these are cheap,
898
** so let's not spend too much effort on the analysis.
899
**
900
** A result of 1 is exact: previous value CANNOT be nil.
901
** A result of 0 is inexact: previous value MAY be nil.
902
*/
903
int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
2,838✔
904
{
905
  /* First check stores. */
906
  IRRef ref = J->chain[loadop+IRDELTA_L2S];
2,838✔
907
  while (ref > xref) {
3,082✔
908
    IRIns *store = IR(ref);
2,022✔
909
    if (store->op1 == xref) {  /* Same xREF. */
2,022✔
910
      /* A nil store MAY alias, but a non-nil store MUST alias. */
911
      return !irt_isnil(store->t);
1,778✔
912
    } else if (irt_isnil(store->t)) {  /* Must check any nil store. */
244✔
913
      IRRef skref = IR(store->op1)->op2;
10✔
914
      IRRef xkref = IR(xref)->op2;
10✔
915
      /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
916
      if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
10✔
917
        if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
10✔
918
          return 0;  /* A nil store with same const key or var key MAY alias. */
919
        /* Different const keys CANNOT alias. */
920
      }  /* Different key types CANNOT alias. */
921
    }  /* Other non-nil stores MAY alias. */
922
    ref = store->prev;
244✔
923
  }
924

925
  /* Check loads since nothing could be derived from stores. */
926
  ref = J->chain[loadop];
1,060✔
927
  while (ref > xref) {
1,250✔
928
    IRIns *load = IR(ref);
447✔
929
    if (load->op1 == xref) {  /* Same xREF. */
447✔
930
      /* A nil load MAY alias, but a non-nil load MUST alias. */
931
      return !irt_isnil(load->t);
257✔
932
    }  /* Other non-nil loads MAY alias. */
933
    ref = load->prev;
190✔
934
  }
935
  return 0;  /* Nothing derived at all, previous value MAY be nil. */
936
}
937

938
/* ------------------------------------------------------------------------ */
939

940
#undef IR
941
#undef fins
942
#undef fleft
943
#undef fright
944

945
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc