• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 6482677868

11 Oct 2023 12:31PM UTC coverage: 88.235% (-0.06%) from 88.296%
6482677868

push

github

Buristan
Fix base register coalescing in side trace.

Thanks to Sergey Kaplun, NiLuJe and Peter Cawley.

(cherry-picked from commit aa2db7ebd)

The previous patch fixed just part of the problem with the register
coalesing. For example, the parent base register may be used inside the
parent or child register sets when it shouldn't. This leads to incorrect
register allocations, which may lead to crashes or undefined behaviour.
This patch fixes it by excluding the parent base register from both
register sets.

The test case for this patch doesn't fail before the commit since it
requires specific register allocation, which is hard to construct and
very fragile. Due to the lack of ideal sync with the upstream
repository, the test is passed before the patch.
It should become correct in future patches.

Resolves tarantool/tarantool#8767
Part of tarantool/tarantool#9145

Sergey Kaplun:
* added the description and the test for the problem

5342 of 5974 branches covered (0.0%)

Branch coverage included in aggregate %.

5 of 5 new or added lines in 1 file covered. (100.0%)

20487 of 23299 relevant lines covered (87.93%)

2753402.99 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.91
/src/lj_opt_mem.c
1
/*
2
** Memory access optimizations.
3
** AA: Alias Analysis using high-level semantic disambiguation.
4
** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
5
** DSE: Dead-Store Elimination.
6
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
7
*/
8

9
#define lj_opt_mem_c
10
#define LUA_CORE
11

12
#include "lj_obj.h"
13

14
#if LJ_HASJIT
15

16
#include "lj_tab.h"
17
#include "lj_ir.h"
18
#include "lj_jit.h"
19
#include "lj_iropt.h"
20
#include "lj_ircall.h"
21
#include "lj_dispatch.h"
22

23
/* Some local macros to save typing. Undef'd at the end. */
24
#define IR(ref)                (&J->cur.ir[(ref)])
25
#define fins                (&J->fold.ins)
26
#define fleft                (J->fold.left)
27
#define fright                (J->fold.right)
28

29
/*
30
** Caveat #1: return value is not always a TRef -- only use with tref_ref().
31
** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
32
*/
33

34
/* Return values from alias analysis. */
35
typedef enum {
36
  ALIAS_NO,        /* The two refs CANNOT alias (exact). */
37
  ALIAS_MAY,        /* The two refs MAY alias (inexact). */
38
  ALIAS_MUST        /* The two refs MUST alias (exact). */
39
} AliasRet;
40

41
/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
42

43
/* Simplified escape analysis: check for intervening stores. */
44
static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
62✔
45
{
46
  IRRef ref = (IRRef)(ir - J->cur.ir);  /* The ref that might be stored. */
62✔
47
  for (ir++; ir < stop; ir++)
62✔
48
    if (ir->op2 == ref &&
×
49
        (ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
×
50
         ir->o == IR_USTORE || ir->o == IR_FSTORE))
×
51
      return ALIAS_MAY;  /* Reference was stored and might alias. */
52
  return ALIAS_NO;  /* Reference was not stored. */
53
}
54

55
/* Alias analysis for two different table references. */
56
static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
57
{
58
  IRIns *taba = IR(ta), *tabb = IR(tb);
59
  int newa, newb;
60
  lj_assertJ(ta != tb, "bad usage");
61
  lj_assertJ(irt_istab(taba->t) && irt_istab(tabb->t), "bad usage");
62
  /* Disambiguate new allocations. */
63
  newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
64
  newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
65
  if (newa && newb)
66
    return ALIAS_NO;  /* Two different allocations never alias. */
67
  if (newb) {  /* At least one allocation? */
68
    IRIns *tmp = taba; taba = tabb; tabb = tmp;
69
  } else if (!newa) {
70
    return ALIAS_MAY;  /* Anything else: we just don't know. */
71
  }
72
  return aa_escape(J, taba, tabb);
73
}
74

75
/* Alias analysis for array and hash access using key-based disambiguation. */
76
static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
77
{
78
  IRRef ka = refa->op2;
79
  IRRef kb = refb->op2;
80
  IRIns *keya, *keyb;
81
  IRRef ta, tb;
82
  if (refa == refb)
83
    return ALIAS_MUST;  /* Shortcut for same refs. */
84
  keya = IR(ka);
85
  if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
86
  keyb = IR(kb);
87
  if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
88
  ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
89
  tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
90
  if (ka == kb) {
91
    /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
92
    if (ta == tb)
93
      return ALIAS_MUST;  /* Same key, same table. */
94
    else
95
      return aa_table(J, ta, tb);  /* Same key, possibly different table. */
96
  }
97
  if (irref_isk(ka) && irref_isk(kb))
98
    return ALIAS_NO;  /* Different constant keys. */
99
  if (refa->o == IR_AREF) {
100
    /* Disambiguate array references based on index arithmetic. */
101
    int32_t ofsa = 0, ofsb = 0;
102
    IRRef basea = ka, baseb = kb;
103
    lj_assertJ(refb->o == IR_AREF, "expected AREF");
104
    /* Gather base and offset from t[base] or t[base+-ofs]. */
105
    if (keya->o == IR_ADD && irref_isk(keya->op2)) {
106
      basea = keya->op1;
107
      ofsa = IR(keya->op2)->i;
108
      if (basea == kb && ofsa != 0)
109
        return ALIAS_NO;  /* t[base+-ofs] vs. t[base]. */
110
    }
111
    if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
112
      baseb = keyb->op1;
113
      ofsb = IR(keyb->op2)->i;
114
      if (ka == baseb && ofsb != 0)
115
        return ALIAS_NO;  /* t[base] vs. t[base+-ofs]. */
116
    }
117
    if (basea == baseb && ofsa != ofsb)
118
      return ALIAS_NO;  /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
119
  } else {
120
    /* Disambiguate hash references based on the type of their keys. */
121
    lj_assertJ((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
122
               (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF),
123
               "bad xREF IR op %d or %d", refa->o, refb->o);
124
    if (!irt_sametype(keya->t, keyb->t))
125
      return ALIAS_NO;  /* Different key types. */
126
  }
127
  if (ta == tb)
128
    return ALIAS_MAY;  /* Same table, cannot disambiguate keys. */
129
  else
130
    return aa_table(J, ta, tb);  /* Try to disambiguate tables. */
131
}
132

133
/* Array and hash load forwarding. */
134
static TRef fwd_ahload(jit_State *J, IRRef xref)
9,800✔
135
{
136
  IRIns *xr = IR(xref);
9,800✔
137
  IRRef lim = xref;  /* Search limit. */
9,800✔
138
  IRRef ref;
9,800✔
139

140
  /* Search for conflicting stores. */
141
  ref = J->chain[fins->o+IRDELTA_L2S];
9,800✔
142
  while (ref > xref) {
10,687✔
143
    IRIns *store = IR(ref);
3,902✔
144
    switch (aa_ahref(J, xr, IR(store->op1))) {
3,902✔
145
    case ALIAS_NO:   break;  /* Continue searching. */
146
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
357✔
147
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
2,658✔
148
    }
149
    ref = store->prev;
887✔
150
  }
151

152
  /* No conflicting store (yet): const-fold loads from allocations. */
153
  {
154
    IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
6,785✔
155
    IRRef tab = ir->op1;
6,785✔
156
    ir = IR(tab);
6,785✔
157
    if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) {
6,785✔
158
      /* A NEWREF with a number key may end up pointing to the array part.
159
      ** But it's referenced from HSTORE and not found in the ASTORE chain.
160
      ** Or a NEWREF may rehash the table and move unrelated number keys.
161
      ** For now simply consider this a conflict without forwarding anything.
162
      */
163
      if (xr->o == IR_AREF) {
62✔
164
        IRRef ref2 = J->chain[IR_NEWREF];
34✔
165
        while (ref2 > tab) {
34✔
166
          IRIns *newref = IR(ref2);
28✔
167
          if (irt_isnum(IR(newref->op2)->t))
28✔
168
            goto cselim;
28✔
169
          ref2 = newref->prev;
×
170
        }
171
      } else {
172
        IRIns *key = IR(xr->op2);
28✔
173
        if (key->o == IR_KSLOT) key = IR(key->op1);
28✔
174
        if (irt_isnum(key->t) && J->chain[IR_NEWREF] > tab)
28✔
175
          goto cselim;
12✔
176
      }
177
      /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
178
      ** But the above search for conflicting stores was limited by xref.
179
      ** So continue searching, limited by the TNEW/TDUP. Store forwarding
180
      ** is ok, too. A conflict does NOT limit the search for a matching load.
181
      */
182
      while (ref > tab) {
62✔
183
        IRIns *store = IR(ref);
52✔
184
        switch (aa_ahref(J, xr, IR(store->op1))) {
52✔
185
        case ALIAS_NO:   break;  /* Continue searching. */
186
        case ALIAS_MAY:  goto cselim;  /* Conflicting store. */
2✔
187
        case ALIAS_MUST: return store->op2;  /* Store forwarding. */
10✔
188
        }
189
        ref = store->prev;
40✔
190
      }
191
      if (ir->o == IR_TNEW && !irt_isnil(fins->t))
10✔
192
        return 0;  /* Type instability in loop-carried dependency. */
193
      if (irt_ispri(fins->t)) {
9✔
194
        return TREF_PRI(irt_type(fins->t));
×
195
      } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) ||
9✔
196
                 irt_isstr(fins->t)) {
197
        TValue keyv;
9✔
198
        cTValue *tv;
9✔
199
        IRIns *key = IR(xr->op2);
9✔
200
        if (key->o == IR_KSLOT) key = IR(key->op1);
9✔
201
        lj_ir_kvalue(J->L, &keyv, key);
9✔
202
        tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
9✔
203
        if (itype2irt(tv) != irt_type(fins->t))
12✔
204
          return 0;  /* Type instability in loop-carried dependency. */
205
        if (irt_isnum(fins->t))
8✔
206
          return lj_ir_knum_u64(J, tv->u64);
6✔
207
        else if (LJ_DUALNUM && irt_isint(fins->t))
2✔
208
          return lj_ir_kint(J, intV(tv));
209
        else
210
          return lj_ir_kstr(J, strV(tv));
2✔
211
      }
212
      /* Othwerwise: don't intern as a constant. */
213
    }
214
  }
215

216
cselim:
6,723✔
217
  /* Try to find a matching load. Below the conflicting store, if any. */
218
  ref = J->chain[fins->o];
7,122✔
219
  while (ref > lim) {
8,974✔
220
    IRIns *load = IR(ref);
3,463✔
221
    if (load->op1 == xref)
3,463✔
222
      return ref;  /* Load forwarding. */
1,611✔
223
    ref = load->prev;
1,852✔
224
  }
225
  return 0;  /* Conflict or no match. */
226
}
227

228
/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
229
static TRef fwd_aload_reassoc(jit_State *J)
709✔
230
{
231
  IRIns *irx = IR(fins->op1);
709✔
232
  IRIns *key = IR(irx->op2);
709✔
233
  if (key->o == IR_ADD && irref_isk(key->op2)) {
709✔
234
    IRIns *add2 = IR(key->op1);
260✔
235
    if (add2->o == IR_ADD && irref_isk(add2->op2) &&
260✔
236
        IR(key->op2)->i == -IR(add2->op2)->i) {
22✔
237
      IRRef ref = J->chain[IR_AREF];
3✔
238
      IRRef lim = add2->op1;
3✔
239
      if (irx->op1 > lim) lim = irx->op1;
3✔
240
      while (ref > lim) {
7✔
241
        IRIns *ir = IR(ref);
7✔
242
        if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
7✔
243
          return fwd_ahload(J, ref);
3✔
244
        ref = ir->prev;
4✔
245
      }
246
    }
247
  }
248
  return 0;
249
}
250

251
/* ALOAD forwarding. */
252
TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
2,683✔
253
{
254
  IRRef ref;
2,683✔
255
  if ((ref = fwd_ahload(J, fins->op1)) ||
2,683✔
256
      (ref = fwd_aload_reassoc(J)))
709✔
257
    return ref;
1,977✔
258
  return EMITFOLD;
706✔
259
}
260

261
/* HLOAD forwarding. */
262
TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
7,114✔
263
{
264
  IRRef ref = fwd_ahload(J, fins->op1);
7,114✔
265
  if (ref)
7,114✔
266
    return ref;
267
  return EMITFOLD;
4,804✔
268
}
269

270
/* HREFK forwarding. */
271
TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
7,679✔
272
{
273
  IRRef tab = fleft->op1;
7,679✔
274
  IRRef ref = J->chain[IR_NEWREF];
7,679✔
275
  while (ref > tab) {
7,691✔
276
    IRIns *newref = IR(ref);
604✔
277
    if (tab == newref->op1) {
604✔
278
      if (fright->op1 == newref->op2)
298✔
279
        return ref;  /* Forward from NEWREF. */
280
      else
281
        goto docse;
236✔
282
    } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
306✔
283
      goto docse;
294✔
284
    }
285
    ref = newref->prev;
12✔
286
  }
287
  /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
288
  if (IR(tab)->o == IR_TDUP)
7,087✔
289
    fins->t.irt &= ~IRT_GUARD;  /* Drop HREFK guard. */
114✔
290
docse:
6,973✔
291
  return CSEFOLD;
7,617✔
292
}
293

294
/* Check whether HREF of TNEW/TDUP can be folded to niltv. */
295
int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
15✔
296
{
297
  IRRef lim = fins->op1;  /* Search limit. */
15✔
298
  IRRef ref;
15✔
299

300
  /* The key for an ASTORE may end up in the hash part after a NEWREF. */
301
  if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
15✔
302
    ref = J->chain[IR_ASTORE];
4✔
303
    while (ref > lim) {
4✔
304
      if (ref < J->chain[IR_NEWREF])
4✔
305
        return 0;  /* Conflict. */
306
      ref = IR(ref)->prev;
×
307
    }
308
  }
309

310
  /* Search for conflicting stores. */
311
  ref = J->chain[IR_HSTORE];
11✔
312
  while (ref > lim) {
26✔
313
    IRIns *store = IR(ref);
15✔
314
    if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
15✔
315
      return 0;  /* Conflict. */
316
    ref = store->prev;
15✔
317
  }
318

319
  return 1;  /* No conflict. Can fold to niltv. */
320
}
321

322
/* Check whether there's no aliasing table.clear. */
323
static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta)
324
{
325
  IRRef ref = J->chain[IR_CALLS];
326
  while (ref > lim) {
327
    IRIns *calls = IR(ref);
328
    if (calls->op2 == IRCALL_lj_tab_clear &&
329
        (ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO))
330
      return 0;  /* Conflict. */
331
    ref = calls->prev;
332
  }
333
  return 1;  /* No conflict. Can safely FOLD/CSE. */
334
}
335

336
/* Check whether there's no aliasing NEWREF/table.clear for the left operand. */
337
int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
29,855✔
338
{
339
  IRRef ta = fins->op1;
29,855✔
340
  IRRef ref = J->chain[IR_NEWREF];
29,855✔
341
  while (ref > lim) {
29,871✔
342
    IRIns *newref = IR(ref);
1,384✔
343
    if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
1,384✔
344
      return 0;  /* Conflict. */
345
    ref = newref->prev;
16✔
346
  }
347
  return fwd_aa_tab_clear(J, lim, ta);
28,487✔
348
}
349

350
/* ASTORE/HSTORE elimination. */
351
TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
4,989✔
352
{
353
  IRRef xref = fins->op1;  /* xREF reference. */
4,989✔
354
  IRRef val = fins->op2;  /* Stored value reference. */
4,989✔
355
  IRIns *xr = IR(xref);
4,989✔
356
  IRRef1 *refp = &J->chain[fins->o];
4,989✔
357
  IRRef ref = *refp;
4,989✔
358
  while (ref > xref) {  /* Search for redundant or conflicting stores. */
5,141✔
359
    IRIns *store = IR(ref);
2,171✔
360
    switch (aa_ahref(J, xr, IR(store->op1))) {
2,171✔
361
    case ALIAS_NO:
362
      break;  /* Continue searching. */
363
    case ALIAS_MAY:        /* Store to MAYBE the same location. */
54✔
364
      if (store->op2 != val)  /* Conflict if the value is different. */
54✔
365
        goto doemit;
52✔
366
      break;  /* Otherwise continue searching. */
367
    case ALIAS_MUST:        /* Store to the same location. */
1,967✔
368
      if (store->op2 == val)  /* Same value: drop the new store. */
1,967✔
369
        return DROPFOLD;
370
      /* Different value: try to eliminate the redundant store. */
371
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
1,942✔
372
        IRIns *ir;
1,922✔
373
        /* Check for any intervening guards (includes conflicting loads). */
374
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
3,829✔
375
          if (irt_isguard(ir->t) || ir->o == IR_CALLL)
1,959✔
376
            goto doemit;  /* No elimination possible. */
52✔
377
        /* Remove redundant store from chain and replace with NOP. */
378
        *refp = store->prev;
1,870✔
379
        store->o = IR_NOP;
1,870✔
380
        store->t.irt = IRT_NIL;
1,870✔
381
        store->op1 = store->op2 = 0;
1,870✔
382
        store->prev = 0;
1,870✔
383
        /* Now emit the new store instead. */
384
      }
385
      goto doemit;
1,890✔
386
    }
387
    ref = *(refp = &store->prev);
152✔
388
  }
389
doemit:
2,970✔
390
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
4,964✔
391
}
392

393
/* -- ULOAD forwarding ---------------------------------------------------- */
394

395
/* The current alias analysis for upvalues is very simplistic. It only
396
** disambiguates between the unique upvalues of the same function.
397
** This is good enough for now, since most upvalues are read-only.
398
**
399
** A more precise analysis would be feasible with the help of the parser:
400
** generate a unique key for every upvalue, even across all prototypes.
401
** Lacking a realistic use-case, it's unclear whether this is beneficial.
402
*/
403
static AliasRet aa_uref(IRIns *refa, IRIns *refb)
73✔
404
{
405
  if (refa->o != refb->o)
73✔
406
    return ALIAS_NO;  /* Different UREFx type. */
407
  if (refa->op1 == refb->op1) {  /* Same function. */
69✔
408
    if (refa->op2 == refb->op2)
56✔
409
      return ALIAS_MUST;  /* Same function, same upvalue idx. */
410
    else
411
      return ALIAS_NO;  /* Same function, different upvalue idx. */
29✔
412
  } else {  /* Different functions, check disambiguation hash values. */
413
    if (((refa->op2 ^ refb->op2) & 0xff))
13✔
414
      return ALIAS_NO;  /* Upvalues with different hash values cannot alias. */
415
    else
416
      return ALIAS_MAY;  /* No conclusion can be drawn for same hash value. */
10✔
417
  }
418
}
419

420
/* ULOAD forwarding. */
421
TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
479✔
422
{
423
  IRRef uref = fins->op1;
479✔
424
  IRRef lim = REF_BASE;  /* Search limit. */
479✔
425
  IRIns *xr = IR(uref);
479✔
426
  IRRef ref;
479✔
427

428
  /* Search for conflicting stores. */
429
  ref = J->chain[IR_USTORE];
479✔
430
  while (ref > lim) {
515✔
431
    IRIns *store = IR(ref);
58✔
432
    switch (aa_uref(xr, IR(store->op1))) {
112✔
433
    case ALIAS_NO:   break;  /* Continue searching. */
434
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
8✔
435
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
14✔
436
    }
437
    ref = store->prev;
36✔
438
  }
439

440
cselim:
457✔
441
  /* Try to find a matching load. Below the conflicting store, if any. */
442

443
  ref = J->chain[IR_ULOAD];
465✔
444
  while (ref > lim) {
680✔
445
    IRIns *ir = IR(ref);
324✔
446
    if (ir->op1 == uref ||
324✔
447
        (IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o))
221✔
448
      return ref;  /* Match for identical or equal UREFx (non-CSEable UREFO). */
109✔
449
    ref = ir->prev;
215✔
450
  }
451
  return lj_ir_emit(J);
356✔
452
}
453

454
/* USTORE elimination. */
455
TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
60✔
456
{
457
  IRRef xref = fins->op1;  /* xREF reference. */
60✔
458
  IRRef val = fins->op2;  /* Stored value reference. */
60✔
459
  IRIns *xr = IR(xref);
60✔
460
  IRRef1 *refp = &J->chain[IR_USTORE];
60✔
461
  IRRef ref = *refp;
60✔
462
  while (ref > xref) {  /* Search for redundant or conflicting stores. */
60✔
463
    IRIns *store = IR(ref);
15✔
464
    switch (aa_uref(xr, IR(store->op1))) {
30✔
465
    case ALIAS_NO:
466
      break;  /* Continue searching. */
467
    case ALIAS_MAY:        /* Store to MAYBE the same location. */
2✔
468
      if (store->op2 != val)  /* Conflict if the value is different. */
2✔
469
        goto doemit;
2✔
470
      break;  /* Otherwise continue searching. */
471
    case ALIAS_MUST:        /* Store to the same location. */
13✔
472
      if (store->op2 == val)  /* Same value: drop the new store. */
13✔
473
        return DROPFOLD;
474
      /* Different value: try to eliminate the redundant store. */
475
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
12✔
476
        IRIns *ir;
7✔
477
        /* Check for any intervening guards (includes conflicting loads). */
478
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
13✔
479
          if (irt_isguard(ir->t))
8✔
480
            goto doemit;  /* No elimination possible. */
2✔
481
        /* Remove redundant store from chain and replace with NOP. */
482
        *refp = store->prev;
5✔
483
        store->o = IR_NOP;
5✔
484
        store->t.irt = IRT_NIL;
5✔
485
        store->op1 = store->op2 = 0;
5✔
486
        store->prev = 0;
5✔
487
        if (ref+1 < J->cur.nins &&
5✔
488
            store[1].o == IR_OBAR && store[1].op1 == xref) {
5✔
489
          IRRef1 *bp = &J->chain[IR_OBAR];
×
490
          IRIns *obar;
×
491
          for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
×
492
            bp = &obar->prev;
×
493
          /* Remove OBAR, too. */
494
          *bp = obar->prev;
×
495
          obar->o = IR_NOP;
×
496
          obar->t.irt = IRT_NIL;
×
497
          obar->op1 = obar->op2 = 0;
×
498
          obar->prev = 0;
×
499
        }
500
        /* Now emit the new store instead. */
501
      }
502
      goto doemit;
10✔
503
    }
504
    ref = *(refp = &store->prev);
×
505
  }
506
doemit:
45✔
507
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
59✔
508
}
509

510
/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
511

512
/* Alias analysis for field access.
513
** Field loads are cheap and field stores are rare.
514
** Simple disambiguation based on field types is good enough.
515
*/
516
static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
57✔
517
{
518
  if (refa->op2 != refb->op2)
57✔
519
    return ALIAS_NO;  /* Different fields. */
520
  if (refa->op1 == refb->op1)
52✔
521
    return ALIAS_MUST;  /* Same field, same object. */
522
  else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
14✔
523
    return aa_table(J, refa->op1, refb->op1);  /* Disambiguate tables. */
14✔
524
  else
525
    return ALIAS_MAY;  /* Same field, possibly different object. */
526
}
527

528
/* Only the loads for mutable fields end up here (see FOLD). */
529
TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
3,717✔
530
{
531
  IRRef oref = fins->op1;  /* Object reference. */
3,717✔
532
  IRRef fid = fins->op2;  /* Field ID. */
3,717✔
533
  IRRef lim = oref;  /* Search limit. */
3,717✔
534
  IRRef ref;
3,717✔
535

536
  /* Search for conflicting stores. */
537
  ref = J->chain[IR_FSTORE];
3,717✔
538
  while (ref > oref) {
3,726✔
539
    IRIns *store = IR(ref);
37✔
540
    switch (aa_fref(J, fins, IR(store->op1))) {
47✔
541
    case ALIAS_NO:   break;  /* Continue searching. */
542
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
6✔
543
    case ALIAS_MUST: return store->op2;  /* Store forwarding. */
22✔
544
    }
545
    ref = store->prev;
9✔
546
  }
547

548
  /* No conflicting store: const-fold field loads from allocations. */
549
  if (fid == IRFL_TAB_META) {
3,689✔
550
    IRIns *ir = IR(oref);
3,262✔
551
    if (ir->o == IR_TNEW || ir->o == IR_TDUP)
3,262✔
552
      return lj_ir_knull(J, IRT_TAB);
264✔
553
  }
554

555
cselim:
3,425✔
556
  /* Try to find a matching load. Below the conflicting store, if any. */
557
  return lj_opt_cselim(J, lim);
3,431✔
558
}
559

560
/* FSTORE elimination. */
561
TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
72✔
562
{
563
  IRRef fref = fins->op1;  /* FREF reference. */
72✔
564
  IRRef val = fins->op2;  /* Stored value reference. */
72✔
565
  IRIns *xr = IR(fref);
72✔
566
  IRRef1 *refp = &J->chain[IR_FSTORE];
72✔
567
  IRRef ref = *refp;
72✔
568
  while (ref > fref) {  /* Search for redundant or conflicting stores. */
74✔
569
    IRIns *store = IR(ref);
20✔
570
    switch (aa_fref(J, xr, IR(store->op1))) {
24✔
571
    case ALIAS_NO:
572
      break;  /* Continue searching. */
573
    case ALIAS_MAY:
4✔
574
      if (store->op2 != val)  /* Conflict if the value is different. */
4✔
575
        goto doemit;
2✔
576
      break;  /* Otherwise continue searching. */
577
    case ALIAS_MUST:
16✔
578
      if (store->op2 == val)  /* Same value: drop the new store. */
16✔
579
        return DROPFOLD;
580
      /* Different value: try to eliminate the redundant store. */
581
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
7✔
582
        IRIns *ir;
4✔
583
        /* Check for any intervening guards or conflicting loads. */
584
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
4✔
585
          if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
3✔
586
            goto doemit;  /* No elimination possible. */
3✔
587
        /* Remove redundant store from chain and replace with NOP. */
588
        *refp = store->prev;
1✔
589
        store->o = IR_NOP;
1✔
590
        store->t.irt = IRT_NIL;
1✔
591
        store->op1 = store->op2 = 0;
1✔
592
        store->prev = 0;
1✔
593
        /* Now emit the new store instead. */
594
      }
595
      goto doemit;
4✔
596
    }
597
    ref = *(refp = &store->prev);
2✔
598
  }
599
doemit:
54✔
600
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
63✔
601
}
602

603
/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
604

605
/* Find cdata allocation for a reference (if any). */
606
static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
206✔
607
{
608
  while (ir->o == IR_ADD) {
222✔
609
    if (!irref_isk(ir->op1)) {
16✔
610
      IRIns *ir1 = aa_findcnew(J, IR(ir->op1));  /* Left-recursion. */
16✔
611
      if (ir1) return ir1;
16✔
612
    }
613
    if (irref_isk(ir->op2)) return NULL;
16✔
614
    ir = IR(ir->op2);  /* Flatten right-recursion. */
16✔
615
  }
616
  return ir->o == IR_CNEW ? ir : NULL;
206✔
617
}
618

619
/* Alias analysis for two cdata allocations. */
620
static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
95✔
621
{
622
  IRIns *cnewa = aa_findcnew(J, refa);
95✔
623
  IRIns *cnewb = aa_findcnew(J, refb);
95✔
624
  if (cnewa == cnewb)
95✔
625
    return ALIAS_MAY;  /* Same allocation or neither is an allocation. */
626
  if (cnewa && cnewb)
64✔
627
    return ALIAS_NO;  /* Two different allocations never alias. */
628
  if (cnewb) { cnewa = cnewb; refb = refa; }
62✔
629
  return aa_escape(J, cnewa, refb);
62✔
630
}
631

632
/* Alias analysis for XLOAD/XSTORE. */
633
static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
634
{
635
  ptrdiff_t ofsa = 0, ofsb = 0;
636
  IRIns *refb = IR(xb->op1);
637
  IRIns *basea = refa, *baseb = refb;
638
  if (refa == refb && irt_sametype(xa->t, xb->t))
639
    return ALIAS_MUST;  /* Shortcut for same refs with identical type. */
640
  /* Offset-based disambiguation. */
641
  if (refa->o == IR_ADD && irref_isk(refa->op2)) {
642
    IRIns *irk = IR(refa->op2);
643
    basea = IR(refa->op1);
644
    ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
645
                                            (ptrdiff_t)irk->i;
646
  }
647
  if (refb->o == IR_ADD && irref_isk(refb->op2)) {
648
    IRIns *irk = IR(refb->op2);
649
    baseb = IR(refb->op1);
650
    ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
651
                                            (ptrdiff_t)irk->i;
652
  }
653
  /* Treat constified pointers like base vs. base+offset. */
654
  if (basea->o == IR_KPTR && baseb->o == IR_KPTR) {
655
    ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea);
656
    baseb = basea;
657
  }
658
  /* This implements (very) strict aliasing rules.
659
  ** Different types do NOT alias, except for differences in signedness.
660
  ** Type punning through unions is allowed (but forces a reload).
661
  */
662
  if (basea == baseb) {
663
    ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
664
    if (ofsa == ofsb) {
665
      if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
666
        return ALIAS_MUST;  /* Same-sized, same-kind. May need to convert. */
667
    } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
668
      return ALIAS_NO;  /* Non-overlapping base+-o1 vs. base+-o2. */
669
    }
670
    /* NYI: extract, extend or reinterpret bits (int <-> fp). */
671
    return ALIAS_MAY;  /* Overlapping or type punning: force reload. */
672
  }
673
  if (!irt_sametype(xa->t, xb->t) &&
674
      !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
675
        ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
676
    return ALIAS_NO;
677
  /* NYI: structural disambiguation. */
678
  return aa_cnew(J, basea, baseb);  /* Try to disambiguate allocations. */
679
}
680

681
/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
682
static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
202✔
683
{
684
  IRRef ref = J->chain[op];
202✔
685
  IRRef lim = op1;
202✔
686
  if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
17✔
687
  while (ref > lim) {
1,191✔
688
    IRIns *ir = IR(ref);
1,128✔
689
    if (ir->op1 == op1 && ir->op2 == op2)
1,128✔
690
      return ref;
691
    ref = ir->prev;
989✔
692
  }
693
  return 0;
694
}
695

696
/* Reassociate index references. */
697
static IRRef reassoc_xref(jit_State *J, IRIns *ir)
129✔
698
{
699
  ptrdiff_t ofs = 0;
129✔
700
  if (ir->o == IR_ADD && irref_isk(ir->op2)) {  /* Get constant offset. */
129✔
701
    IRIns *irk = IR(ir->op2);
111✔
702
    ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
111✔
703
                                           (ptrdiff_t)irk->i;
×
704
    ir = IR(ir->op1);
111✔
705
  }
706
  if (ir->o == IR_ADD) {  /* Add of base + index. */
129✔
707
    /* Index ref > base ref for loop-carried dependences. Only check op1. */
708
    IRIns *ir2, *ir1 = IR(ir->op1);
78✔
709
    int32_t shift = 0;
78✔
710
    IRRef idxref;
78✔
711
    /* Determine index shifts. Don't bother with IR_MUL here. */
712
    if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
78✔
713
      shift = IR(ir1->op2)->i;
50✔
714
    else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
28✔
715
      shift = 1;
716
    else
717
      ir1 = ir;
20✔
718
    ir2 = IR(ir1->op1);
78✔
719
    /* A non-reassociated add. Must be a loop-carried dependence. */
720
    if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
78✔
721
      ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
73✔
722
    else
723
      return 0;
724
    idxref = ir2->op1;
73✔
725
    /* Try to CSE the reassociated chain. Give up if not found. */
726
    if (ir1 != ir &&
73✔
727
        !(idxref = reassoc_trycse(J, ir1->o, idxref,
112✔
728
                                  ir1->o == IR_BSHL ? ir1->op2 : idxref)))
56✔
729
      return 0;
730
    if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
146✔
731
      return 0;
732
    if (ofs != 0) {
73✔
733
      IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
73✔
734
      if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
146✔
735
        return 0;
736
    }
737
    return idxref;  /* Success, found a reassociated index reference. Phew. */
10✔
738
  }
739
  return 0;  /* Failure. */
740
}
741

742
/* XLOAD forwarding. */
743
TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
567✔
744
{
745
  IRRef xref = fins->op1;
567✔
746
  IRIns *xr = IR(xref);
567✔
747
  IRRef lim = xref;  /* Search limit. */
567✔
748
  IRRef ref;
567✔
749

750
  if ((fins->op2 & IRXLOAD_READONLY))
567✔
751
    goto cselim;
165✔
752
  if ((fins->op2 & IRXLOAD_VOLATILE))
402✔
753
    goto doemit;
×
754

755
  /* Search for conflicting stores. */
756
  ref = J->chain[IR_XSTORE];
402✔
757
retry:
412✔
758
  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
412✔
759
  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
412✔
760
  while (ref > lim) {
554✔
761
    IRIns *store = IR(ref);
264✔
762
    switch (aa_xref(J, xr, fins, store)) {
264✔
763
    case ALIAS_NO:   break;  /* Continue searching. */
764
    case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
45✔
765
    case ALIAS_MUST:
77✔
766
      /* Emit conversion if the loaded type doesn't match the forwarded type. */
767
      if (!irt_sametype(fins->t, IR(store->op2)->t)) {
77✔
768
        IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t);
9✔
769
        if (dt == IRT_I8 || dt == IRT_I16) {  /* Trunc + sign-extend. */
9✔
770
          st = dt | IRCONV_SEXT;
2✔
771
          dt = IRT_INT;
2✔
772
        } else if (dt == IRT_U8 || dt == IRT_U16) {  /* Trunc + zero-extend. */
7✔
773
          st = dt;
1✔
774
          dt = IRT_INT;
1✔
775
        }
776
        fins->ot = IRT(IR_CONV, dt);
9✔
777
        fins->op1 = store->op2;
9✔
778
        fins->op2 = (dt<<5)|st;
9✔
779
        return RETRYFOLD;
9✔
780
      }
781
      return store->op2;  /* Store forwarding. */
68✔
782
    }
783
    ref = store->prev;
142✔
784
  }
785

786
cselim:
290✔
787
  /* Try to find a matching load. Below the conflicting store, if any. */
788
  ref = J->chain[IR_XLOAD];
500✔
789
  while (ref > lim) {
574✔
790
    /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
791
    if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
133✔
792
      return ref;
59✔
793
    ref = IR(ref)->prev;
74✔
794
  }
795

796
  /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
797
  if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
441✔
798
      xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
129✔
799
    ref = J->chain[IR_XSTORE];
10✔
800
    while (ref > lim)  /* Skip stores that have already been checked. */
10✔
801
      ref = IR(ref)->prev;
×
802
    lim = xref;
10✔
803
    xr = IR(xref);
10✔
804
    goto retry;  /* Retry with the reassociated reference. */
10✔
805
  }
806
doemit:
431✔
807
  return EMITFOLD;
431✔
808
}
809

810
/* XSTORE elimination. */
811
TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
417✔
812
{
813
  IRRef xref = fins->op1;
417✔
814
  IRIns *xr = IR(xref);
417✔
815
  IRRef lim = xref;  /* Search limit. */
417✔
816
  IRRef val = fins->op2;  /* Stored value reference. */
417✔
817
  IRRef1 *refp = &J->chain[IR_XSTORE];
417✔
818
  IRRef ref = *refp;
417✔
819
  if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
417✔
820
  if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
417✔
821
  if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW];
417✔
822
  while (ref > lim) {  /* Search for redundant or conflicting stores. */
591✔
823
    IRIns *store = IR(ref);
235✔
824
    switch (aa_xref(J, xr, fins, store)) {
235✔
825
    case ALIAS_NO:
826
      break;  /* Continue searching. */
827
    case ALIAS_MAY:
7✔
828
      if (store->op2 != val)  /* Conflict if the value is different. */
7✔
829
        goto doemit;
7✔
830
      break;  /* Otherwise continue searching. */
831
    case ALIAS_MUST:
54✔
832
      if (store->op2 == val)  /* Same value: drop the new store. */
54✔
833
        return DROPFOLD;
834
      /* Different value: try to eliminate the redundant store. */
835
      if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
49✔
836
        IRIns *ir;
6✔
837
        /* Check for any intervening guards or any XLOADs (no AA performed). */
838
        for (ir = IR(J->cur.nins-1); ir > store; ir--)
12✔
839
          if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
11✔
840
            goto doemit;  /* No elimination possible. */
5✔
841
        /* Remove redundant store from chain and replace with NOP. */
842
        *refp = store->prev;
1✔
843
        store->o = IR_NOP;
1✔
844
        store->t.irt = IRT_NIL;
1✔
845
        store->op1 = store->op2 = 0;
1✔
846
        store->prev = 0;
1✔
847
        /* Now emit the new store instead. */
848
      }
849
      goto doemit;
44✔
850
    }
851
    ref = *(refp = &store->prev);
174✔
852
  }
853
doemit:
356✔
854
  return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
412✔
855
}
856

857
/* -- Forwarding of lj_tab_len -------------------------------------------- */
858

859
/* This is rather simplistic right now, but better than nothing. */
860
TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J)
95✔
861
{
862
  IRRef tab = fins->op1;  /* Table reference. */
95✔
863
  IRRef lim = tab;  /* Search limit. */
95✔
864
  IRRef ref;
95✔
865

866
  /* Any ASTORE is a conflict and limits the search. */
867
  if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
95✔
868

869
  /* Search for conflicting HSTORE with numeric key. */
870
  ref = J->chain[IR_HSTORE];
95✔
871
  while (ref > lim) {
101✔
872
    IRIns *store = IR(ref);
6✔
873
    IRIns *href = IR(store->op1);
6✔
874
    IRIns *key = IR(href->op2);
6✔
875
    if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
6✔
876
      lim = ref;  /* Conflicting store found, limits search for TLEN. */
877
      break;
878
    }
879
    ref = store->prev;
6✔
880
  }
881

882
  /* Search for aliasing table.clear. */
883
  if (!fwd_aa_tab_clear(J, lim, tab))
95✔
884
    return lj_ir_emit(J);
×
885

886
  /* Try to find a matching load. Below the conflicting store, if any. */
887
  return lj_opt_cselim(J, lim);
95✔
888
}
889

890
/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
891

892
/* Check whether the previous value for a table store is non-nil.
893
** This can be derived either from a previous store or from a previous
894
** load (because all loads from tables perform a type check).
895
**
896
** The result of the analysis can be used to avoid the metatable check
897
** and the guard against HREF returning niltv. Both of these are cheap,
898
** so let's not spend too much effort on the analysis.
899
**
900
** A result of 1 is exact: previous value CANNOT be nil.
901
** A result of 0 is inexact: previous value MAY be nil.
902
*/
903
int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
40,741✔
904
{
905
  /* First check stores. */
906
  IRRef ref = J->chain[loadop+IRDELTA_L2S];
40,741✔
907
  while (ref > xref) {
40,985✔
908
    IRIns *store = IR(ref);
2,157✔
909
    if (store->op1 == xref) {  /* Same xREF. */
2,157✔
910
      /* A nil store MAY alias, but a non-nil store MUST alias. */
911
      return !irt_isnil(store->t);
1,913✔
912
    } else if (irt_isnil(store->t)) {  /* Must check any nil store. */
244✔
913
      IRRef skref = IR(store->op1)->op2;
10✔
914
      IRRef xkref = IR(xref)->op2;
10✔
915
      /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
916
      if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
10✔
917
        if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
10✔
918
          return 0;  /* A nil store with same const key or var key MAY alias. */
919
        /* Different const keys CANNOT alias. */
920
      }  /* Different key types CANNOT alias. */
921
    }  /* Other non-nil stores MAY alias. */
922
    ref = store->prev;
244✔
923
  }
924

925
  /* Check loads since nothing could be derived from stores. */
926
  ref = J->chain[loadop];
38,828✔
927
  while (ref > xref) {
39,018✔
928
    IRIns *load = IR(ref);
460✔
929
    if (load->op1 == xref) {  /* Same xREF. */
460✔
930
      /* A nil load MAY alias, but a non-nil load MUST alias. */
931
      return !irt_isnil(load->t);
270✔
932
    }  /* Other non-nil loads MAY alias. */
933
    ref = load->prev;
190✔
934
  }
935
  return 0;  /* Nothing derived at all, previous value MAY be nil. */
936
}
937

938
/* ------------------------------------------------------------------------ */
939

940
#undef IR
941
#undef fins
942
#undef fleft
943
#undef fright
944

945
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc