• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 8093903834

29 Feb 2024 09:24AM UTC coverage: 92.666% (+0.08%) from 92.591%
8093903834

push

github

Buristan
Fix C file generation in jit.bcsave.

Thanks to codicodi.

(cherry picked from commit 62903bacf)

LuaJIT allows exporting bytecode to a C file using the option `-b`, see
[1]. For building a generated C file in C++ projects, C file uses a
macro `__cplusplus` [2], but this macro was broken by the commit
a9dd47b7f ("Extend -b to generate
c/h/obj/o files with embedded bytecode."). With this breakage, C/C++
compiler removes the definition of an array with the bytecode, and the
resulted object file misses a symbol with the bytecode.

The patch fixes the broken macro.

Note, test test/lua-Harness-tests/411-luajit.t checks the presence of
the macro `__cplusplus` in the generated C file, however, it doesn't
catch the bug.

Sergey Bronnikov:
* added the description and the test for the problem

Part of tarantool/tarantool#9595

1. https://luajit.org/running.html
2. https://en.cppreference.com/w/cpp/preprocessor/replace

Reviewed-by: Sergey Kaplun <skaplun@tarantool.org>
Reviewed-by: Igor Munkin <imun@tarantool.org>
Signed-off-by: Sergey Kaplun <skaplun@tarantool.org>
(cherry picked from commit 593508cf4)

5660 of 6016 branches covered (94.08%)

Branch coverage included in aggregate %.

21607 of 23409 relevant lines covered (92.3%)

2816787.98 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.37
/src/lj_opt_mem.c
1
/*
2
** Memory access optimizations.
3
** AA: Alias Analysis using high-level semantic disambiguation.
4
** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
5
** DSE: Dead-Store Elimination.
6
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
7
*/
8

9
#define lj_opt_mem_c
10
#define LUA_CORE
11

12
#include "lj_obj.h"
13

14
#if LJ_HASJIT
15

16
#include "lj_tab.h"
17
#include "lj_ir.h"
18
#include "lj_jit.h"
19
#include "lj_iropt.h"
20
#include "lj_ircall.h"
21
#include "lj_dispatch.h"
22

23
/* Some local macros to save typing. Undef'd at the end. */
24
#define IR(ref)                (&J->cur.ir[(ref)])
25
#define fins                (&J->fold.ins)
26
#define fleft                (J->fold.left)
27
#define fright                (J->fold.right)
28

29
/*
30
** Caveat #1: return value is not always a TRef -- only use with tref_ref().
31
** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
32
*/
33

34
/* Return values from alias analysis. */
35
typedef enum {
36
  ALIAS_NO,        /* The two refs CANNOT alias (exact). */
37
  ALIAS_MAY,        /* The two refs MAY alias (inexact). */
38
  ALIAS_MUST        /* The two refs MUST alias (exact). */
39
} AliasRet;
40

41
/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
42

43
/* Simplified escape analysis: check for intervening stores. */
44
static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
62✔
45
{
46
  IRRef ref = (IRRef)(ir - J->cur.ir);  /* The ref that might be stored. */
62✔
47
  for (ir++; ir < stop; ir++)
62✔
48
    if (ir->op2 == ref &&
×
49
        (ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
×
50
         ir->o == IR_USTORE || ir->o == IR_FSTORE))
×
51
      return ALIAS_MAY;  /* Reference was stored and might alias. */
52
  return ALIAS_NO;  /* Reference was not stored. */
53
}
54

55
/* Alias analysis for two different table references. */
56
static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
57
{
58
  IRIns *taba = IR(ta), *tabb = IR(tb);
59
  int newa, newb;
60
  lj_assertJ(ta != tb, "bad usage");
61
  lj_assertJ(irt_istab(taba->t) && irt_istab(tabb->t), "bad usage");
62
  /* Disambiguate new allocations. */
63
  newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
64
  newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
65
  if (newa && newb)
66
    return ALIAS_NO;  /* Two different allocations never alias. */
67
  if (newb) {  /* At least one allocation? */
68
    IRIns *tmp = taba; taba = tabb; tabb = tmp;
69
  } else if (!newa) {
70
    return ALIAS_MAY;  /* Anything else: we just don't know. */
71
  }
72
  return aa_escape(J, taba, tabb);
73
}
74

75
/* Check whether there's no aliasing table.clear. */
76
static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta)
77
{
78
  IRRef ref = J->chain[IR_CALLS];
79
  while (ref > lim) {
80
    IRIns *calls = IR(ref);
81
    if (calls->op2 == IRCALL_lj_tab_clear &&
82
        (ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO))
83
      return 0;  /* Conflict. */
84
    ref = calls->prev;
85
  }
86
  return 1;  /* No conflict. Can safely FOLD/CSE. */
87
}
88

89
/* Check whether there's no aliasing NEWREF/table.clear for the left operand. */
90
int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
61,606✔
91
{
92
  IRRef ta = fins->op1;
61,606✔
93
  IRRef ref = J->chain[IR_NEWREF];
61,606✔
94
  while (ref > lim) {
61,638✔
95
    IRIns *newref = IR(ref);
2,272✔
96
    if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
2,272✔
97
      return 0;  /* Conflict. */
98
    ref = newref->prev;
32✔
99
  }
100
  return fwd_aa_tab_clear(J, lim, ta);
59,366✔
101
}
102

103
/* Alias analysis for array and hash access using key-based disambiguation. */
104
static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
105
{
106
  IRRef ka = refa->op2;
107
  IRRef kb = refb->op2;
108
  IRIns *keya, *keyb;
109
  IRRef ta, tb;
110
  if (refa == refb)
111
    return ALIAS_MUST;  /* Shortcut for same refs. */
112
  keya = IR(ka);
113
  if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
114
  keyb = IR(kb);
115
  if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
116
  ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
117
  tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
118
  if (ka == kb) {
119
    /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
120
    if (ta == tb)
121
      return ALIAS_MUST;  /* Same key, same table. */
122
    else
123
      return aa_table(J, ta, tb);  /* Same key, possibly different table. */
124
  }
125
  if (irref_isk(ka) && irref_isk(kb))
126
    return ALIAS_NO;  /* Different constant keys. */
127
  if (refa->o == IR_AREF) {
128
    /* Disambiguate array references based on index arithmetic. */
129
    int32_t ofsa = 0, ofsb = 0;
130
    IRRef basea = ka, baseb = kb;
131
    lj_assertJ(refb->o == IR_AREF, "expected AREF");
132
    /* Gather base and offset from t[base] or t[base+-ofs]. */
133
    if (keya->o == IR_ADD && irref_isk(keya->op2)) {
134
      basea = keya->op1;
135
      ofsa = IR(keya->op2)->i;
136
      if (basea == kb && ofsa != 0)
137
        return ALIAS_NO;  /* t[base+-ofs] vs. t[base]. */
138
    }
139
    if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
140
      baseb = keyb->op1;
141
      ofsb = IR(keyb->op2)->i;
142
      if (ka == baseb && ofsb != 0)
143
        return ALIAS_NO;  /* t[base] vs. t[base+-ofs]. */
144
    }
145
    if (basea == baseb && ofsa != ofsb)
146
      return ALIAS_NO;  /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
147
  } else {
148
    /* Disambiguate hash references based on the type of their keys. */
149
    lj_assertJ((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
150
               (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF),
151
               "bad xREF IR op %d or %d", refa->o, refb->o);
152
    if (!irt_sametype(keya->t, keyb->t))
153
      return ALIAS_NO;  /* Different key types. */
154
  }
155
  if (ta == tb)
156
    return ALIAS_MAY;  /* Same table, cannot disambiguate keys. */
157
  else
158
    return aa_table(J, ta, tb);  /* Try to disambiguate tables. */
159
}
160

161
/* Array and hash load forwarding. */
162
static TRef fwd_ahload(jit_State *J, IRRef xref)
21,813✔
163
{
164
  IRIns *xr = IR(xref);
21,813✔
165
  IRRef lim = xref;  /* Search limit. */
21,813✔
166
  IRRef ref;
21,813✔
167

168
  /* Search for conflicting stores. */
169
  ref = J->chain[fins->o+IRDELTA_L2S];
21,813✔
170
  while (ref > xref) {
25,256✔
171
    IRIns *store = IR(ref);
10,215✔
172
    switch (aa_ahref(J, xr, IR(store->op1))) {
10,215✔
173
    case ALIAS_NO:   break;  /* Continue searching. */
174
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
1,557✔
175
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
5,215✔
176
    }
177
    ref = store->prev;
3,443✔
178
  }
179

180
  /* No conflicting store (yet): const-fold loads from allocations. */
181
  {
182
    IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
15,041✔
183
    IRRef tab = ir->op1;
15,041✔
184
    ir = IR(tab);
15,041✔
185
    if ((ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) &&
15,137✔
186
        fwd_aa_tab_clear(J, tab, tab)) {
96✔
187
      /* A NEWREF with a number key may end up pointing to the array part.
188
      ** But it's referenced from HSTORE and not found in the ASTORE chain.
189
      ** Or a NEWREF may rehash the table and move unrelated number keys.
190
      ** For now simply consider this a conflict without forwarding anything.
191
      */
192
      if (xr->o == IR_AREF) {
84✔
193
        IRRef ref2 = J->chain[IR_NEWREF];
50✔
194
        while (ref2 > tab) {
50✔
195
          IRIns *newref = IR(ref2);
30✔
196
          if (irt_isnum(IR(newref->op2)->t))
30✔
197
            goto cselim;
30✔
198
          ref2 = newref->prev;
×
199
        }
200
      } else {
201
        IRIns *key = IR(xr->op2);
34✔
202
        if (key->o == IR_KSLOT) key = IR(key->op1);
34✔
203
        if (irt_isnum(key->t) && J->chain[IR_NEWREF] > tab)
34✔
204
          goto cselim;
12✔
205
      }
206
      /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
207
      ** But the above search for conflicting stores was limited by xref.
208
      ** So continue searching, limited by the TNEW/TDUP. Store forwarding
209
      ** is ok, too. A conflict does NOT limit the search for a matching load.
210
      */
211
      while (ref > tab) {
105✔
212
        IRIns *store = IR(ref);
83✔
213
        switch (aa_ahref(J, xr, IR(store->op1))) {
83✔
214
        case ALIAS_NO:   break;  /* Continue searching. */
215
        case ALIAS_MAY:  goto cselim;  /* Conflicting store. */
10✔
216
        case ALIAS_MUST: return store->op2;  /* Store forwarding. */
10✔
217
        }
218
        ref = store->prev;
63✔
219
      }
220
      /* Simplified here: let loop_unroll() figure out any type instability. */
221
      if (ir->o == IR_TNEW) {
22✔
222
        return TREF_NIL;
223
      } else {
224
        TValue keyv;
21✔
225
        cTValue *tv;
21✔
226
        IRIns *key = IR(xr->op2);
21✔
227
        if (key->o == IR_KSLOT) key = IR(key->op1);
21✔
228
        lj_ir_kvalue(J->L, &keyv, key);
21✔
229
        tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
21✔
230
        if (tvispri(tv))
21✔
231
          return TREF_PRI(itype2irt(tv));
21✔
232
        else if (tvisnum(tv))
16✔
233
          return lj_ir_knum_u64(J, tv->u64);
14✔
234
        else if (tvisint(tv))
2✔
235
          return lj_ir_kint(J, intV(tv));
236
        else if (tvisgcv(tv))
2✔
237
          return lj_ir_kstr(J, strV(tv));
2✔
238
      }
239
      /* Othwerwise: don't intern as a constant. */
240
    }
241
  }
242

243
cselim:
14,957✔
244
  /* Try to find a matching load. Below the conflicting store, if any. */
245
  ref = J->chain[fins->o];
16,566✔
246
  while (ref > lim) {
21,689✔
247
    IRIns *load = IR(ref);
8,220✔
248
    if (load->op1 == xref)
8,220✔
249
      return ref;  /* Load forwarding. */
3,097✔
250
    ref = load->prev;
5,123✔
251
  }
252
  return 0;  /* Conflict or no match. */
253
}
254

255
/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
256
static TRef fwd_aload_reassoc(jit_State *J)
754✔
257
{
258
  IRIns *irx = IR(fins->op1);
754✔
259
  IRIns *key = IR(irx->op2);
754✔
260
  if (key->o == IR_ADD && irref_isk(key->op2)) {
754✔
261
    IRIns *add2 = IR(key->op1);
276✔
262
    if (add2->o == IR_ADD && irref_isk(add2->op2) &&
276✔
263
        IR(key->op2)->i == -IR(add2->op2)->i) {
25✔
264
      IRRef ref = J->chain[IR_AREF];
3✔
265
      IRRef lim = add2->op1;
3✔
266
      if (irx->op1 > lim) lim = irx->op1;
3✔
267
      while (ref > lim) {
7✔
268
        IRIns *ir = IR(ref);
7✔
269
        if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
7✔
270
          return fwd_ahload(J, ref);
3✔
271
        ref = ir->prev;
4✔
272
      }
273
    }
274
  }
275
  return 0;
276
}
277

278
/* ALOAD forwarding. */
279
TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
2,708✔
280
{
281
  IRRef ref;
2,708✔
282
  if ((ref = fwd_ahload(J, fins->op1)) ||
2,708✔
283
      (ref = fwd_aload_reassoc(J)))
754✔
284
    return ref;
1,957✔
285
  return EMITFOLD;
751✔
286
}
287

288
/* HLOAD forwarding. */
289
TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
19,102✔
290
{
291
  IRRef ref = fwd_ahload(J, fins->op1);
19,102✔
292
  if (ref)
19,102✔
293
    return ref;
294
  return EMITFOLD;
12,715✔
295
}
296

297
/* HREFK forwarding. */
298
TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
22,680✔
299
{
300
  IRRef tab = fleft->op1;
22,680✔
301
  IRRef ref = J->chain[IR_NEWREF];
22,680✔
302
  while (ref > tab) {
22,698✔
303
    IRIns *newref = IR(ref);
2,463✔
304
    if (tab == newref->op1) {
2,463✔
305
      if (fright->op1 == newref->op2 && fwd_aa_tab_clear(J, ref, tab))
1,184✔
306
        return ref;  /* Forward from NEWREF. */
307
      else
308
        goto docse;
904✔
309
    } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
1,279✔
310
      goto docse;
1,261✔
311
    }
312
    ref = newref->prev;
18✔
313
  }
314
  /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
315
  if (IR(tab)->o == IR_TDUP && fwd_aa_tab_clear(J, tab, tab))
20,235✔
316
    fins->t.irt &= ~IRT_GUARD;  /* Drop HREFK guard. */
221✔
317
docse:
20,014✔
318
  return CSEFOLD;
22,400✔
319
}
320

321
/* Check whether HREF of TNEW/TDUP can be folded to niltv. */
322
int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
33✔
323
{
324
  IRRef lim = fins->op1;  /* Search limit. */
33✔
325
  IRRef ref;
33✔
326

327
  /* The key for an ASTORE may end up in the hash part after a NEWREF. */
328
  if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
33✔
329
    ref = J->chain[IR_ASTORE];
12✔
330
    while (ref > lim) {
14✔
331
      if (ref < J->chain[IR_NEWREF])
7✔
332
        return 0;  /* Conflict. */
333
      ref = IR(ref)->prev;
2✔
334
    }
335
  }
336

337
  /* Search for conflicting stores. */
338
  ref = J->chain[IR_HSTORE];
28✔
339
  while (ref > lim) {
54✔
340
    IRIns *store = IR(ref);
34✔
341
    if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
34✔
342
      return 0;  /* Conflict. */
343
    ref = store->prev;
26✔
344
  }
345

346
  return 1;  /* No conflict. Can fold to niltv. */
347
}
348

349
/* ASTORE/HSTORE elimination. */
350
TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
8,610✔
351
{
352
  IRRef xref = fins->op1;  /* xREF reference. */
8,610✔
353
  IRRef val = fins->op2;  /* Stored value reference. */
8,610✔
354
  IRIns *xr = IR(xref);
8,610✔
355
  IRRef1 *refp = &J->chain[fins->o];
8,610✔
356
  IRRef ref = *refp;
8,610✔
357
  while (ref > xref) {  /* Search for redundant or conflicting stores. */
9,091✔
358
    IRIns *store = IR(ref);
2,674✔
359
    switch (aa_ahref(J, xr, IR(store->op1))) {
2,674✔
360
    case ALIAS_NO:
361
      break;  /* Continue searching. */
362
    case ALIAS_MAY:        /* Store to MAYBE the same location. */
147✔
363
      if (store->op2 != val)  /* Conflict if the value is different. */
147✔
364
        goto doemit;
145✔
365
      break;  /* Otherwise continue searching. */
366
    case ALIAS_MUST:        /* Store to the same location. */
2,048✔
367
      if (store->op2 == val)  /* Same value: drop the new store. */
2,048✔
368
        return DROPFOLD;
369
      /* Different value: try to eliminate the redundant store. */
370
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
2,022✔
371
        IRIns *ir;
1,999✔
372
        /* Check for any intervening guards (includes conflicting loads). */
373
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
4,032✔
374
          if (irt_isguard(ir->t) || ir->o == IR_CALLL)
2,153✔
375
            goto doemit;  /* No elimination possible. */
120✔
376
        /* Remove redundant store from chain and replace with NOP. */
377
        *refp = store->prev;
1,879✔
378
        store->o = IR_NOP;
1,879✔
379
        store->t.irt = IRT_NIL;
1,879✔
380
        store->op1 = store->op2 = 0;
1,879✔
381
        store->prev = 0;
1,879✔
382
        /* Now emit the new store instead. */
383
      }
384
      goto doemit;
1,902✔
385
    }
386
    ref = *(refp = &store->prev);
481✔
387
  }
388
doemit:
6,417✔
389
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
8,584✔
390
}
391

392
/* -- ULOAD forwarding ---------------------------------------------------- */
393

394
/* The current alias analysis for upvalues is very simplistic. It only
395
** disambiguates between the unique upvalues of the same function.
396
** This is good enough for now, since most upvalues are read-only.
397
**
398
** A more precise analysis would be feasible with the help of the parser:
399
** generate a unique key for every upvalue, even across all prototypes.
400
** Lacking a realistic use-case, it's unclear whether this is beneficial.
401
*/
402
static AliasRet aa_uref(IRIns *refa, IRIns *refb)
88✔
403
{
404
  if (refa->o != refb->o)
88✔
405
    return ALIAS_NO;  /* Different UREFx type. */
406
  if (refa->op1 == refb->op1) {  /* Same function. */
84✔
407
    if (refa->op2 == refb->op2)
73✔
408
      return ALIAS_MUST;  /* Same function, same upvalue idx. */
409
    else
410
      return ALIAS_NO;  /* Same function, different upvalue idx. */
41✔
411
  } else {  /* Different functions, check disambiguation hash values. */
412
    if (((refa->op2 ^ refb->op2) & 0xff))
11✔
413
      return ALIAS_NO;  /* Upvalues with different hash values cannot alias. */
414
    else
415
      return ALIAS_MAY;  /* No conclusion can be drawn for same hash value. */
8✔
416
  }
417
}
418

419
/* ULOAD forwarding. */
420
TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
1,042✔
421
{
422
  IRRef uref = fins->op1;
1,042✔
423
  IRRef lim = REF_BASE;  /* Search limit. */
1,042✔
424
  IRIns *xr = IR(uref);
1,042✔
425
  IRRef ref;
1,042✔
426

427
  /* Search for conflicting stores. */
428
  ref = J->chain[IR_USTORE];
1,042✔
429
  while (ref > lim) {
1,090✔
430
    IRIns *store = IR(ref);
71✔
431
    switch (aa_uref(xr, IR(store->op1))) {
138✔
432
    case ALIAS_NO:   break;  /* Continue searching. */
433
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
6✔
434
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
17✔
435
    }
436
    ref = store->prev;
48✔
437
  }
438

439
cselim:
1,019✔
440
  /* Try to find a matching load. Below the conflicting store, if any. */
441

442
  ref = J->chain[IR_ULOAD];
1,025✔
443
  while (ref > lim) {
1,393✔
444
    IRIns *ir = IR(ref);
662✔
445
    if (ir->op1 == uref ||
662✔
446
        (IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o))
377✔
447
      return ref;  /* Match for identical or equal UREFx (non-CSEable UREFO). */
294✔
448
    ref = ir->prev;
368✔
449
  }
450
  return lj_ir_emit(J);
731✔
451
}
452

453
/* USTORE elimination. */
454
TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
75✔
455
{
456
  IRRef xref = fins->op1;  /* xREF reference. */
75✔
457
  IRRef val = fins->op2;  /* Stored value reference. */
75✔
458
  IRIns *xr = IR(xref);
75✔
459
  IRRef1 *refp = &J->chain[IR_USTORE];
75✔
460
  IRRef ref = *refp;
75✔
461
  while (ref > xref) {  /* Search for redundant or conflicting stores. */
75✔
462
    IRIns *store = IR(ref);
17✔
463
    switch (aa_uref(xr, IR(store->op1))) {
34✔
464
    case ALIAS_NO:
465
      break;  /* Continue searching. */
466
    case ALIAS_MAY:        /* Store to MAYBE the same location. */
2✔
467
      if (store->op2 != val)  /* Conflict if the value is different. */
2✔
468
        goto doemit;
2✔
469
      break;  /* Otherwise continue searching. */
470
    case ALIAS_MUST:        /* Store to the same location. */
15✔
471
      if (store->op2 == val)  /* Same value: drop the new store. */
15✔
472
        return DROPFOLD;
473
      /* Different value: try to eliminate the redundant store. */
474
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
13✔
475
        IRIns *ir;
7✔
476
        /* Check for any intervening guards (includes conflicting loads). */
477
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
13✔
478
          if (irt_isguard(ir->t))
8✔
479
            goto doemit;  /* No elimination possible. */
2✔
480
        /* Remove redundant store from chain and replace with NOP. */
481
        *refp = store->prev;
5✔
482
        store->o = IR_NOP;
5✔
483
        store->t.irt = IRT_NIL;
5✔
484
        store->op1 = store->op2 = 0;
5✔
485
        store->prev = 0;
5✔
486
        if (ref+1 < J->cur.nins &&
5✔
487
            store[1].o == IR_OBAR && store[1].op1 == xref) {
5✔
488
          IRRef1 *bp = &J->chain[IR_OBAR];
×
489
          IRIns *obar;
×
490
          for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
×
491
            bp = &obar->prev;
×
492
          /* Remove OBAR, too. */
493
          *bp = obar->prev;
×
494
          obar->o = IR_NOP;
×
495
          obar->t.irt = IRT_NIL;
×
496
          obar->op1 = obar->op2 = 0;
×
497
          obar->prev = 0;
×
498
        }
499
        /* Now emit the new store instead. */
500
      }
501
      goto doemit;
11✔
502
    }
503
    ref = *(refp = &store->prev);
×
504
  }
505
doemit:
58✔
506
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
73✔
507
}
508

509
/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
510

511
/* Alias analysis for field access.
512
** Field loads are cheap and field stores are rare.
513
** Simple disambiguation based on field types is good enough.
514
*/
515
static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
50✔
516
{
517
  if (refa->op2 != refb->op2)
50✔
518
    return ALIAS_NO;  /* Different fields. */
519
  if (refa->op1 == refb->op1)
45✔
520
    return ALIAS_MUST;  /* Same field, same object. */
521
  else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
14✔
522
    return aa_table(J, refa->op1, refb->op1);  /* Disambiguate tables. */
14✔
523
  else
524
    return ALIAS_MAY;  /* Same field, possibly different object. */
525
}
526

527
/* Only the loads for mutable fields end up here (see FOLD). */
528
TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
8,209✔
529
{
530
  IRRef oref = fins->op1;  /* Object reference. */
8,209✔
531
  IRRef fid = fins->op2;  /* Field ID. */
8,209✔
532
  IRRef lim = oref;  /* Search limit. */
8,209✔
533
  IRRef ref;
8,209✔
534

535
  /* Search for conflicting stores. */
536
  ref = J->chain[IR_FSTORE];
8,209✔
537
  while (ref > oref) {
8,218✔
538
    IRIns *store = IR(ref);
33✔
539
    switch (aa_fref(J, fins, IR(store->op1))) {
43✔
540
    case ALIAS_NO:   break;  /* Continue searching. */
541
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
6✔
542
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
18✔
543
    }
544
    ref = store->prev;
9✔
545
  }
546

547
  /* No conflicting store: const-fold field loads from allocations. */
548
  if (fid == IRFL_TAB_META) {
8,185✔
549
    IRIns *ir = IR(oref);
7,684✔
550
    if (ir->o == IR_TNEW || ir->o == IR_TDUP)
7,684✔
551
      return lj_ir_knull(J, IRT_TAB);
449✔
552
  }
553

554
cselim:
7,736✔
555
  /* Try to find a matching load. Below the conflicting store, if any. */
556
  return lj_opt_cselim(J, lim);
7,742✔
557
}
558

559
/* FSTORE elimination. */
560
TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
72✔
561
{
562
  IRRef fref = fins->op1;  /* FREF reference. */
72✔
563
  IRRef val = fins->op2;  /* Stored value reference. */
72✔
564
  IRIns *xr = IR(fref);
72✔
565
  IRRef1 *refp = &J->chain[IR_FSTORE];
72✔
566
  IRRef ref = *refp;
72✔
567
  while (ref > fref) {  /* Search for redundant or conflicting stores. */
74✔
568
    IRIns *store = IR(ref);
17✔
569
    switch (aa_fref(J, xr, IR(store->op1))) {
21✔
570
    case ALIAS_NO:
571
      break;  /* Continue searching. */
572
    case ALIAS_MAY:
4✔
573
      if (store->op2 != val)  /* Conflict if the value is different. */
4✔
574
        goto doemit;
2✔
575
      break;  /* Otherwise continue searching. */
576
    case ALIAS_MUST:
13✔
577
      if (store->op2 == val)  /* Same value: drop the new store. */
13✔
578
        return DROPFOLD;
579
      /* Different value: try to eliminate the redundant store. */
580
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
4✔
581
        IRIns *ir;
2✔
582
        /* Check for any intervening guards or conflicting loads. */
583
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
2✔
584
          if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
2✔
585
            goto doemit;  /* No elimination possible. */
2✔
586
        /* Remove redundant store from chain and replace with NOP. */
587
        *refp = store->prev;
×
588
        store->o = IR_NOP;
×
589
        store->t.irt = IRT_NIL;
×
590
        store->op1 = store->op2 = 0;
×
591
        store->prev = 0;
×
592
        /* Now emit the new store instead. */
593
      }
594
      goto doemit;
2✔
595
    }
596
    ref = *(refp = &store->prev);
2✔
597
  }
598
doemit:
57✔
599
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
63✔
600
}
601

602
/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
603

604
/* Find cdata allocation for a reference (if any). */
605
static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
208✔
606
{
607
  while (ir->o == IR_ADD) {
224✔
608
    if (!irref_isk(ir->op1)) {
16✔
609
      IRIns *ir1 = aa_findcnew(J, IR(ir->op1));  /* Left-recursion. */
16✔
610
      if (ir1) return ir1;
16✔
611
    }
612
    if (irref_isk(ir->op2)) return NULL;
16✔
613
    ir = IR(ir->op2);  /* Flatten right-recursion. */
16✔
614
  }
615
  return ir->o == IR_CNEW ? ir : NULL;
208✔
616
}
617

618
/* Alias analysis for two cdata allocations. */
619
static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
96✔
620
{
621
  IRIns *cnewa = aa_findcnew(J, refa);
96✔
622
  IRIns *cnewb = aa_findcnew(J, refb);
96✔
623
  if (cnewa == cnewb)
96✔
624
    return ALIAS_MAY;  /* Same allocation or neither is an allocation. */
625
  if (cnewa && cnewb)
64✔
626
    return ALIAS_NO;  /* Two different allocations never alias. */
627
  if (cnewb) { cnewa = cnewb; refb = refa; }
62✔
628
  return aa_escape(J, cnewa, refb);
62✔
629
}
630

631
/* Alias analysis for XLOAD/XSTORE. */
632
static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
633
{
634
  ptrdiff_t ofsa = 0, ofsb = 0;
635
  IRIns *refb = IR(xb->op1);
636
  IRIns *basea = refa, *baseb = refb;
637
  if (refa == refb && irt_sametype(xa->t, xb->t))
638
    return ALIAS_MUST;  /* Shortcut for same refs with identical type. */
639
  /* Offset-based disambiguation. */
640
  if (refa->o == IR_ADD && irref_isk(refa->op2)) {
641
    IRIns *irk = IR(refa->op2);
642
    basea = IR(refa->op1);
643
    ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
644
                                            (ptrdiff_t)irk->i;
645
  }
646
  if (refb->o == IR_ADD && irref_isk(refb->op2)) {
647
    IRIns *irk = IR(refb->op2);
648
    baseb = IR(refb->op1);
649
    ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
650
                                            (ptrdiff_t)irk->i;
651
  }
652
  /* Treat constified pointers like base vs. base+offset. */
653
  if (basea->o == IR_KPTR && baseb->o == IR_KPTR) {
654
    ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea);
655
    baseb = basea;
656
  }
657
  /* This implements (very) strict aliasing rules.
658
  ** Different types do NOT alias, except for differences in signedness.
659
  ** Type punning through unions is allowed (but forces a reload).
660
  */
661
  if (basea == baseb) {
662
    ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
663
    if (ofsa == ofsb) {
664
      if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
665
        return ALIAS_MUST;  /* Same-sized, same-kind. May need to convert. */
666
    } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
667
      return ALIAS_NO;  /* Non-overlapping base+-o1 vs. base+-o2. */
668
    }
669
    /* NYI: extract, extend or reinterpret bits (int <-> fp). */
670
    return ALIAS_MAY;  /* Overlapping or type punning: force reload. */
671
  }
672
  if (!irt_sametype(xa->t, xb->t) &&
673
      !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
674
        ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
675
    return ALIAS_NO;
676
  /* NYI: structural disambiguation. */
677
  return aa_cnew(J, basea, baseb);  /* Try to disambiguate allocations. */
678
}
679

680
/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
681
static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
347✔
682
{
683
  IRRef ref = J->chain[op];
347✔
684
  IRRef lim = op1;
347✔
685
  if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
23✔
686
  while (ref > lim) {
1,832✔
687
    IRIns *ir = IR(ref);
1,723✔
688
    if (ir->op1 == op1 && ir->op2 == op2)
1,723✔
689
      return ref;
690
    ref = ir->prev;
1,485✔
691
  }
692
  return 0;
693
}
694

695
/* Reassociate index references. */
696
static IRRef reassoc_xref(jit_State *J, IRIns *ir)
179✔
697
{
698
  ptrdiff_t ofs = 0;
179✔
699
  if (ir->o == IR_ADD && irref_isk(ir->op2)) {  /* Get constant offset. */
179✔
700
    IRIns *irk = IR(ir->op2);
150✔
701
    ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
150✔
702
                                           (ptrdiff_t)irk->i;
×
703
    ir = IR(ir->op1);
150✔
704
  }
705
  if (ir->o == IR_ADD) {  /* Add of base + index. */
179✔
706
    /* Index ref > base ref for loop-carried dependences. Only check op1. */
707
    IRIns *ir2, *ir1 = IR(ir->op1);
131✔
708
    int32_t shift = 0;
131✔
709
    IRRef idxref;
131✔
710
    /* Determine index shifts. Don't bother with IR_MUL here. */
711
    if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
131✔
712
      shift = IR(ir1->op2)->i;
93✔
713
    else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
38✔
714
      shift = 1;
715
    else
716
      ir1 = ir;
28✔
717
    ir2 = IR(ir1->op1);
131✔
718
    /* A non-reassociated add. Must be a loop-carried dependence. */
719
    if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
131✔
720
      ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
123✔
721
    else
722
      return 0;
723
    idxref = ir2->op1;
123✔
724
    /* Try to CSE the reassociated chain. Give up if not found. */
725
    if (ir1 != ir &&
123✔
726
        !(idxref = reassoc_trycse(J, ir1->o, idxref,
202✔
727
                                  ir1->o == IR_BSHL ? ir1->op2 : idxref)))
101✔
728
      return 0;
729
    if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
246✔
730
      return 0;
731
    if (ofs != 0) {
123✔
732
      IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
123✔
733
      if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
246✔
734
        return 0;
735
    }
736
    return idxref;  /* Success, found a reassociated index reference. Phew. */
14✔
737
  }
738
  return 0;  /* Failure. */
739
}
740

741
/* XLOAD forwarding. */
742
TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
780✔
743
{
744
  IRRef xref = fins->op1;
780✔
745
  IRIns *xr = IR(xref);
780✔
746
  IRRef lim = xref;  /* Search limit. */
780✔
747
  IRRef ref;
780✔
748

749
  if ((fins->op2 & IRXLOAD_READONLY))
780✔
750
    goto cselim;
172✔
751
  if ((fins->op2 & IRXLOAD_VOLATILE))
608✔
752
    goto doemit;
×
753

754
  /* Search for conflicting stores. */
755
  ref = J->chain[IR_XSTORE];
608✔
756
retry:
622✔
757
  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
622✔
758
  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
622✔
759
  while (ref > lim) {
756✔
760
    IRIns *store = IR(ref);
281✔
761
    switch (aa_xref(J, xr, fins, store)) {
281✔
762
    case ALIAS_NO:   break;  /* Continue searching. */
763
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
45✔
764
    case ALIAS_MUST:
102✔
765
      /* Emit conversion if the loaded type doesn't match the forwarded type. */
766
      if (!irt_sametype(fins->t, IR(store->op2)->t)) {
102✔
767
        IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t);
28✔
768
        if (dt == IRT_I8 || dt == IRT_I16) {  /* Trunc + sign-extend. */
28✔
769
          st = dt | IRCONV_SEXT;
11✔
770
          dt = IRT_INT;
11✔
771
        } else if (dt == IRT_U8 || dt == IRT_U16) {  /* Trunc + zero-extend. */
17✔
772
          st = dt;
8✔
773
          dt = IRT_INT;
8✔
774
        }
775
        fins->ot = IRT(IR_CONV, dt);
28✔
776
        fins->op1 = store->op2;
28✔
777
        fins->op2 = (dt<<5)|st;
28✔
778
        return RETRYFOLD;
28✔
779
      }
780
      return store->op2;  /* Store forwarding. */
74✔
781
    }
782
    ref = store->prev;
134✔
783
  }
784

785
cselim:
475✔
786
  /* Try to find a matching load. Below the conflicting store, if any. */
787
  ref = J->chain[IR_XLOAD];
692✔
788
  while (ref > lim) {
780✔
789
    /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
790
    if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
177✔
791
      return ref;
89✔
792
    ref = IR(ref)->prev;
88✔
793
  }
794

795
  /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
796
  if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
603✔
797
      xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
179✔
798
    ref = J->chain[IR_XSTORE];
14✔
799
    while (ref > lim)  /* Skip stores that have already been checked. */
14✔
800
      ref = IR(ref)->prev;
×
801
    lim = xref;
14✔
802
    xr = IR(xref);
14✔
803
    goto retry;  /* Retry with the reassociated reference. */
14✔
804
  }
805
doemit:
589✔
806
  return EMITFOLD;
589✔
807
}
808

809
/* XSTORE elimination. */
810
TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
555✔
811
{
812
  IRRef xref = fins->op1;
555✔
813
  IRIns *xr = IR(xref);
555✔
814
  IRRef lim = xref;  /* Search limit. */
555✔
815
  IRRef val = fins->op2;  /* Stored value reference. */
555✔
816
  IRRef1 *refp = &J->chain[IR_XSTORE];
555✔
817
  IRRef ref = *refp;
555✔
818
  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
555✔
819
  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
555✔
820
  if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW];
555✔
821
  while (ref > lim) {  /* Search for redundant or conflicting stores. */
744✔
822
    IRIns *store = IR(ref);
272✔
823
    switch (aa_xref(J, xr, fins, store)) {
272✔
824
    case ALIAS_NO:
825
      break;  /* Continue searching. */
826
    case ALIAS_MAY:
8✔
827
      if (store->op2 != val)  /* Conflict if the value is different. */
8✔
828
        goto doemit;
7✔
829
      break;  /* Otherwise continue searching. */
830
    case ALIAS_MUST:
76✔
831
      if (store->op2 == val)  /* Same value: drop the new store. */
76✔
832
        return DROPFOLD;
833
      /* Different value: try to eliminate the redundant store. */
834
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
62✔
835
        IRIns *ir;
9✔
836
        /* Check for any intervening guards or any XLOADs (no AA performed). */
837
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
33✔
838
          if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
30✔
839
            goto doemit;  /* No elimination possible. */
6✔
840
        /* Remove redundant store from chain and replace with NOP. */
841
        *refp = store->prev;
3✔
842
        store->o = IR_NOP;
3✔
843
        store->t.irt = IRT_NIL;
3✔
844
        store->op1 = store->op2 = 0;
3✔
845
        store->prev = 0;
3✔
846
        /* Now emit the new store instead. */
847
      }
848
      goto doemit;
56✔
849
    }
850
    ref = *(refp = &store->prev);
189✔
851
  }
852
doemit:
472✔
853
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
541✔
854
}
855

856
/* -- Forwarding of lj_tab_len -------------------------------------------- */
857

858
/* This is rather simplistic right now, but better than nothing. */
859
TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J)
117✔
860
{
861
  IRRef tab = fins->op1;  /* Table reference. */
117✔
862
  IRRef lim = tab;  /* Search limit. */
117✔
863
  IRRef ref;
117✔
864

865
  /* Any ASTORE is a conflict and limits the search. */
866
  if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
117✔
867

868
  /* Search for conflicting HSTORE with numeric key. */
869
  ref = J->chain[IR_HSTORE];
117✔
870
  while (ref > lim) {
119✔
871
    IRIns *store = IR(ref);
15✔
872
    IRIns *href = IR(store->op1);
15✔
873
    IRIns *key = IR(href->op2);
15✔
874
    if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
15✔
875
      lim = ref;  /* Conflicting store found, limits search for TLEN. */
876
      break;
877
    }
878
    ref = store->prev;
2✔
879
  }
880

881
  /* Search for aliasing table.clear. */
882
  if (!fwd_aa_tab_clear(J, lim, tab))
117✔
883
    return lj_ir_emit(J);
×
884

885
  /* Try to find a matching load. Below the conflicting store, if any. */
886
  return lj_opt_cselim(J, lim);
117✔
887
}
888

889
/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
890

891
/* Check whether the previous value for a table store is non-nil.
892
** This can be derived either from a previous store or from a previous
893
** load (because all loads from tables perform a type check).
894
**
895
** The result of the analysis can be used to avoid the metatable check
896
** and the guard against HREF returning niltv. Both of these are cheap,
897
** so let's not spend too much effort on the analysis.
898
**
899
** A result of 1 is exact: previous value CANNOT be nil.
900
** A result of 0 is inexact: previous value MAY be nil.
901
*/
902
int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
43,802✔
903
{
904
  /* First check stores. */
905
  IRRef ref = J->chain[loadop+IRDELTA_L2S];
43,802✔
906
  while (ref > xref) {
44,653✔
907
    IRIns *store = IR(ref);
2,896✔
908
    if (store->op1 == xref) {  /* Same xREF. */
2,896✔
909
      /* A nil store MAY alias, but a non-nil store MUST alias. */
910
      return !irt_isnil(store->t);
2,045✔
911
    } else if (irt_isnil(store->t)) {  /* Must check any nil store. */
851✔
912
      IRRef skref = IR(store->op1)->op2;
11✔
913
      IRRef xkref = IR(xref)->op2;
11✔
914
      /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
915
      if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
11✔
916
        if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
11✔
917
          return 0;  /* A nil store with same const key or var key MAY alias. */
918
        /* Different const keys CANNOT alias. */
919
      }  /* Different key types CANNOT alias. */
920
    }  /* Other non-nil stores MAY alias. */
921
    ref = store->prev;
851✔
922
  }
923

924
  /* Check loads since nothing could be derived from stores. */
925
  ref = J->chain[loadop];
41,757✔
926
  while (ref > xref) {
42,427✔
927
    IRIns *load = IR(ref);
1,200✔
928
    if (load->op1 == xref) {  /* Same xREF. */
1,200✔
929
      /* A nil load MAY alias, but a non-nil load MUST alias. */
930
      return !irt_isnil(load->t);
530✔
931
    }  /* Other non-nil loads MAY alias. */
932
    ref = load->prev;
670✔
933
  }
934
  return 0;  /* Nothing derived at all, previous value MAY be nil. */
935
}
936

937
/* ------------------------------------------------------------------------ */
938

939
#undef IR
940
#undef fins
941
#undef fleft
942
#undef fright
943

944
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc