• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 6035198545

31 Aug 2023 08:55AM UTC coverage: 88.225% (+0.4%) from 87.822%
6035198545

push

github

fckxorg
test: don't skip tool CLI flag for tarantool

That skipcond was introduced to overcome the obstacles
of LuaJIT's integration testing in Tarantool. Since
the required patch is now in the Tarantool master, this
skipcond is now unnecessary.

Related to tarantool/tarantool#5688

5340 of 5975 branches covered (0.0%)

Branch coverage included in aggregate %.

20495 of 23308 relevant lines covered (87.93%)

1297339.67 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.77
/src/lj_trace.c
1
/*
2
** Trace management.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
*/
5

6
#define lj_trace_c
7
#define LUA_CORE
8

9
#include "lj_obj.h"
10

11
#if LJ_HASJIT
12

13
#include "lj_gc.h"
14
#include "lj_err.h"
15
#include "lj_debug.h"
16
#include "lj_str.h"
17
#include "lj_frame.h"
18
#include "lj_state.h"
19
#include "lj_bc.h"
20
#include "lj_ir.h"
21
#include "lj_jit.h"
22
#include "lj_iropt.h"
23
#include "lj_mcode.h"
24
#include "lj_trace.h"
25
#include "lj_snap.h"
26
#include "lj_gdbjit.h"
27
#include "lj_record.h"
28
#include "lj_asm.h"
29
#include "lj_dispatch.h"
30
#include "lj_vm.h"
31
#include "lj_vmevent.h"
32
#include "lj_target.h"
33
#if LJ_HASMEMPROF
34
#include "lj_memprof.h"
35
#endif
36
#if LJ_HASSYSPROF
37
#include "lj_sysprof.h"
38
#endif
39

40
/* -- Error handling ------------------------------------------------------ */
41

42
/* Synchronous abort with error message. */
43
void lj_trace_err(jit_State *J, TraceError e)
846✔
44
{
45
  setnilV(&J->errinfo);  /* No error info. */
846✔
46
  setintV(J->L->top++, (int32_t)e);
846✔
47
  lj_err_throw(J->L, LUA_ERRRUN);
846✔
48
}
49

50
/* Synchronous abort with error message and error info. */
51
void lj_trace_err_info(jit_State *J, TraceError e)
62✔
52
{
53
  setintV(J->L->top++, (int32_t)e);
62✔
54
  lj_err_throw(J->L, LUA_ERRRUN);
62✔
55
}
56

57
/* -- Trace management ---------------------------------------------------- */
58

59
/* The current trace is first assembled in J->cur. The variable length
60
** arrays point to shared, growable buffers (J->irbuf etc.). When trace
61
** recording ends successfully, the current trace and its data structures
62
** are copied to a new (compact) GCtrace object.
63
*/
64

65
/* Find a free trace number. */
66
static TraceNo trace_findfree(jit_State *J)
5,392✔
67
{
68
  MSize osz, lim;
5,392✔
69
  if (J->freetrace == 0)
5,392✔
70
    J->freetrace = 1;
318✔
71
  for (; J->freetrace < J->sizetrace; J->freetrace++)
5,695✔
72
    if (traceref(J, J->freetrace) == NULL)
5,564✔
73
      return J->freetrace++;
5,261✔
74
  /* Need to grow trace array. */
75
  lim = (MSize)J->param[JIT_P_maxtrace] + 1;
131✔
76
  if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535;
131✔
77
  osz = J->sizetrace;
131✔
78
  if (osz >= lim)
131✔
79
    return 0;  /* Too many traces. */
80
  lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef);
131✔
81
  for (; osz < J->sizetrace; osz++)
4,765✔
82
    setgcrefnull(J->trace[osz]);
4,634✔
83
  return J->freetrace;
131✔
84
}
85

86
#define TRACE_APPENDVEC(field, szfield, tp) \
87
  T->field = (tp *)p; \
88
  memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \
89
  p += J->cur.szfield*sizeof(tp);
90

91
#ifdef LUAJIT_USE_PERFTOOLS
92
/*
93
** Create symbol table of JIT-compiled code. For use with Linux perf tools.
94
** Example usage:
95
**   perf record -f -e cycles luajit test.lua
96
**   perf report -s symbol
97
**   rm perf.data /tmp/perf-*.map
98
*/
99
#include <stdio.h>
100
#include <unistd.h>
101

102
static void perftools_addtrace(GCtrace *T)
103
{
104
  static FILE *fp;
105
  GCproto *pt = &gcref(T->startpt)->pt;
106
  const BCIns *startpc = mref(T->startpc, const BCIns);
107
  const char *name = proto_chunknamestr(pt);
108
  BCLine lineno;
109
  if (name[0] == '@' || name[0] == '=')
110
    name++;
111
  else
112
    name = "(string)";
113
  lj_assertX(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc,
114
             "trace PC out of range");
115
  lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
116
  if (!fp) {
117
    char fname[40];
118
    sprintf(fname, "/tmp/perf-%d.map", getpid());
119
    if (!(fp = fopen(fname, "w"))) return;
120
    setlinebuf(fp);
121
  }
122
  fprintf(fp, "%lx %x TRACE_%d::%s:%u\n",
123
          (long)T->mcode, T->szmcode, T->traceno, name, lineno);
124
}
125
#endif
126

127
/* Allocate space for copy of T. */
128
GCtrace * LJ_FASTCALL lj_trace_alloc(lua_State *L, GCtrace *T)
3,800✔
129
{
130
  size_t sztr = ((sizeof(GCtrace)+7)&~7);
3,800✔
131
  size_t szins = (T->nins-T->nk)*sizeof(IRIns);
3,800✔
132
  size_t sz = sztr + szins +
3,800✔
133
              T->nsnap*sizeof(SnapShot) +
3,800✔
134
              T->nsnapmap*sizeof(SnapEntry);
3,800✔
135
  GCtrace *T2 = lj_mem_newt(L, (MSize)sz, GCtrace);
3,800✔
136
  char *p = (char *)T2 + sztr;
3,800✔
137
  T2->gct = ~LJ_TTRACE;
3,800✔
138
  T2->marked = 0;
3,800✔
139
  T2->traceno = 0;
3,800✔
140
  T2->ir = (IRIns *)p - T->nk;
3,800✔
141
  T2->nins = T->nins;
3,800✔
142
  T2->nk = T->nk;
3,800✔
143
  T2->nsnap = T->nsnap;
3,800✔
144
  T2->nsnapmap = T->nsnapmap;
3,800✔
145
  memcpy(p, T->ir + T->nk, szins);
3,800✔
146
  L2J(L)->tracenum++;
3,800✔
147
  return T2;
3,800✔
148
}
149

150
/* Save current trace by copying and compacting it. */
151
static void trace_save(jit_State *J, GCtrace *T)
3,720✔
152
{
153
  size_t sztr = ((sizeof(GCtrace)+7)&~7);
3,720✔
154
  size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns);
3,720✔
155
  char *p = (char *)T + sztr;
3,720✔
156
  memcpy(T, &J->cur, sizeof(GCtrace));
3,720✔
157
  setgcrefr(T->nextgc, J2G(J)->gc.root);
3,720✔
158
  setgcrefp(J2G(J)->gc.root, T);
3,720✔
159
  newwhite(J2G(J), T);
3,720✔
160
  T->gct = ~LJ_TTRACE;
3,720✔
161
  T->ir = (IRIns *)p - J->cur.nk;  /* The IR has already been copied above. */
3,720✔
162
  p += szins;
3,720✔
163
  TRACE_APPENDVEC(snap, nsnap, SnapShot)
3,720✔
164
  TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry)
3,720✔
165
  J->cur.traceno = 0;
3,720✔
166
  J->curfinal = NULL;
3,720✔
167
  setgcrefp(J->trace[T->traceno], T);
3,720✔
168
  lj_gc_barriertrace(J2G(J), T->traceno);
3,720✔
169
  lj_gdbjit_addtrace(J, T);
3,720✔
170
#ifdef LUAJIT_USE_PERFTOOLS
171
  perftools_addtrace(T);
172
#endif
173

174
  /* Add a new trace to the profiler. */
175
#if LJ_HASMEMPROF
176
  lj_memprof_add_trace(T);
3,720✔
177
#endif
178

179
#if LJ_HASSYSPROF
180
  lj_sysprof_add_trace(T);
3,720✔
181
#endif
182
}
3,720✔
183

184
void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T)
3,798✔
185
{
186
  jit_State *J = G2J(g);
3,798✔
187
  if (T->traceno) {
3,798✔
188
    lj_gdbjit_deltrace(J, T);
3,194✔
189
    if (T->traceno < J->freetrace)
3,194✔
190
      J->freetrace = T->traceno;
3,019✔
191
    setgcrefnull(J->trace[T->traceno]);
3,194✔
192
  }
193
  lj_mem_free(g, T,
7,596✔
194
    ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
3,798✔
195
    T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry));
3,798✔
196
  J->tracenum--;
3,798✔
197
}
3,798✔
198

199
/* Re-enable compiling a prototype by unpatching any modified bytecode. */
200
void lj_trace_reenableproto(GCproto *pt)
9✔
201
{
202
  if ((pt->flags & PROTO_ILOOP)) {
9✔
203
    BCIns *bc = proto_bc(pt);
4✔
204
    BCPos i, sizebc = pt->sizebc;
4✔
205
    pt->flags &= ~PROTO_ILOOP;
4✔
206
    if (bc_op(bc[0]) == BC_IFUNCF)
4✔
207
      setbc_op(&bc[0], BC_FUNCF);
2✔
208
    for (i = 1; i < sizebc; i++) {
402✔
209
      BCOp op = bc_op(bc[i]);
398✔
210
      if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP)
398✔
211
        setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP);
2✔
212
    }
213
  }
214
}
9✔
215

216
/* Unpatch the bytecode modified by a root trace. */
217
static void trace_unpatch(jit_State *J, GCtrace *T)
218
{
219
  BCOp op = bc_op(T->startins);
220
  BCIns *pc = mref(T->startpc, BCIns);
221
  UNUSED(J);
222
  if (op == BC_JMP)
223
    return;  /* No need to unpatch branches in parent traces (yet). */
224
  switch (bc_op(*pc)) {
225
  case BC_JFORL:
226
    lj_assertJ(traceref(J, bc_d(*pc)) == T, "JFORL references other trace");
227
    *pc = T->startins;
228
    pc += bc_j(T->startins);
229
    lj_assertJ(bc_op(*pc) == BC_JFORI, "FORL does not point to JFORI");
230
    setbc_op(pc, BC_FORI);
231
    break;
232
  case BC_JITERL:
233
  case BC_JLOOP:
234
    lj_assertJ(op == BC_ITERL || op == BC_LOOP || bc_isret(op),
235
               "bad original bytecode %d", op);
236
    *pc = T->startins;
237
    break;
238
  case BC_JMP:
239
    lj_assertJ(op == BC_ITERL, "bad original bytecode %d", op);
240
    pc += bc_j(*pc)+2;
241
    if (bc_op(*pc) == BC_JITERL) {
242
      lj_assertJ(traceref(J, bc_d(*pc)) == T, "JITERL references other trace");
243
      *pc = T->startins;
244
    }
245
    break;
246
  case BC_JFUNCF:
247
    lj_assertJ(op == BC_FUNCF, "bad original bytecode %d", op);
248
    *pc = T->startins;
249
    break;
250
  default:  /* Already unpatched. */
251
    break;
252
  }
253
}
254

255
/* Flush a root trace. */
256
static void trace_flushroot(jit_State *J, GCtrace *T)
257
{
258
  GCproto *pt = &gcref(T->startpt)->pt;
259
  lj_assertJ(T->root == 0, "not a root trace");
260
  lj_assertJ(pt != NULL, "trace has no prototype");
261
  /* First unpatch any modified bytecode. */
262
  trace_unpatch(J, T);
263
  /* Unlink root trace from chain anchored in prototype. */
264
  if (pt->trace == T->traceno) {  /* Trace is first in chain. Easy. */
265
    pt->trace = T->nextroot;
266
  } else if (pt->trace) {  /* Otherwise search in chain of root traces. */
267
    GCtrace *T2 = traceref(J, pt->trace);
268
    if (T2) {
269
      for (; T2->nextroot; T2 = traceref(J, T2->nextroot))
270
        if (T2->nextroot == T->traceno) {
271
          T2->nextroot = T->nextroot;  /* Unlink from chain. */
272
          break;
273
        }
274
    }
275
  }
276
}
277

278
/* Flush a trace. Only root traces are considered. */
279
void lj_trace_flush(jit_State *J, TraceNo traceno)
17✔
280
{
281
  if (traceno > 0 && traceno < J->sizetrace) {
17✔
282
    GCtrace *T = traceref(J, traceno);
17✔
283
    if (T && T->root == 0)
17✔
284
      trace_flushroot(J, T);
17✔
285
  }
286
}
17✔
287

288
/* Flush all traces associated with a prototype. */
289
void lj_trace_flushproto(global_State *g, GCproto *pt)
42✔
290
{
291
  while (pt->trace != 0)
45✔
292
    trace_flushroot(G2J(g), traceref(G2J(g), pt->trace));
3✔
293
}
42✔
294

295
/* Flush all traces. */
296
int lj_trace_flushall(lua_State *L)
287✔
297
{
298
  jit_State *J = L2J(L);
287✔
299
  ptrdiff_t i;
287✔
300
  if ((J2G(J)->hookmask & HOOK_GC))
287✔
301
    return 1;
302
  for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) {
4,298✔
303
    GCtrace *T = traceref(J, i);
4,011✔
304
    if (T) {
4,011✔
305
      if (T->root == 0)
525✔
306
        trace_flushroot(J, T);
456✔
307
      lj_gdbjit_deltrace(J, T);
525✔
308
      T->traceno = T->link = 0;  /* Blacklist the link for cont_stitch. */
525✔
309
      setgcrefnull(J->trace[i]);
525✔
310
    }
311
  }
312
  J->cur.traceno = 0;
287✔
313
  J->freetrace = 0;
287✔
314
  /* Clear penalty cache. */
315
  memset(J->penalty, 0, sizeof(J->penalty));
287✔
316
  /* Free the whole machine code and invalidate all exit stub groups. */
317
  lj_mcode_free(J);
287✔
318
  memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
287✔
319
  lj_vmevent_send(L, TRACE,
287✔
320
    setstrV(L, L->top++, lj_str_newlit(L, "flush"));
321
  );
322
  return 0;
323
}
324

325
/* Initialize JIT compiler state. */
326
void lj_trace_initstate(global_State *g)
244✔
327
{
328
  jit_State *J = G2J(g);
244✔
329
  TValue *tv;
244✔
330

331
  /* Initialize aligned SIMD constants. */
332
  tv = LJ_KSIMD(J, LJ_KSIMD_ABS);
244✔
333
  tv[0].u64 = U64x(7fffffff,ffffffff);
244✔
334
  tv[1].u64 = U64x(7fffffff,ffffffff);
244✔
335
  tv = LJ_KSIMD(J, LJ_KSIMD_NEG);
244✔
336
  tv[0].u64 = U64x(80000000,00000000);
244✔
337
  tv[1].u64 = U64x(80000000,00000000);
244✔
338

339
  /* Initialize 32/64 bit constants. */
340
#if LJ_TARGET_X86ORX64
341
  J->k64[LJ_K64_TOBIT].u64 = U64x(43380000,00000000);
244✔
342
#if LJ_32
343
  J->k64[LJ_K64_M2P64_31].u64 = U64x(c1e00000,00000000);
344
#endif
345
  J->k64[LJ_K64_2P64].u64 = U64x(43f00000,00000000);
244✔
346
  J->k32[LJ_K32_M2P64_31] = LJ_64 ? 0xdf800000 : 0xcf000000;
244✔
347
#endif
348
#if LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS64
349
  J->k64[LJ_K64_M2P64].u64 = U64x(c3f00000,00000000);
244✔
350
#endif
351
#if LJ_TARGET_PPC
352
  J->k32[LJ_K32_2P52_2P31] = 0x59800004;
353
  J->k32[LJ_K32_2P52] = 0x59800000;
354
#endif
355
#if LJ_TARGET_PPC || LJ_TARGET_MIPS
356
  J->k32[LJ_K32_2P31] = 0x4f000000;
357
#endif
358
#if LJ_TARGET_MIPS
359
  J->k64[LJ_K64_2P31].u64 = U64x(41e00000,00000000);
360
#if LJ_64
361
  J->k64[LJ_K64_2P63].u64 = U64x(43e00000,00000000);
362
  J->k32[LJ_K32_2P63] = 0x5f000000;
363
  J->k32[LJ_K32_M2P64] = 0xdf800000;
364
#endif
365
#endif
366
}
244✔
367

368
/* Free everything associated with the JIT compiler state. */
369
void lj_trace_freestate(global_State *g)
235✔
370
{
371
  jit_State *J = G2J(g);
235✔
372
#ifdef LUA_USE_ASSERT
373
  {  /* This assumes all traces have already been freed. */
374
    ptrdiff_t i;
375
    for (i = 1; i < (ptrdiff_t)J->sizetrace; i++)
376
      lj_assertG(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL,
377
                 "trace still allocated");
378
  }
379
#endif
380
  lj_mcode_free(J);
235✔
381
  lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
235✔
382
  lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
235✔
383
  lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);
235✔
384
  lj_mem_freevec(g, J->trace, J->sizetrace, GCRef);
235✔
385
}
235✔
386

387
/* -- Penalties and blacklisting ------------------------------------------ */
388

389
/* Blacklist a bytecode instruction. */
390
static void blacklist_pc(GCproto *pt, BCIns *pc)
8✔
391
{
392
  setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP);
8✔
393
  pt->flags |= PROTO_ILOOP;
8✔
394
}
395

396
/* Penalize a bytecode instruction. */
397
static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e)
1,440✔
398
{
399
  uint32_t i, val = PENALTY_MIN;
1,440✔
400
  for (i = 0; i < PENALTY_SLOTS; i++)
70,648✔
401
    if (mref(J->penalty[i].pc, const BCIns) == pc) {  /* Cache slot found? */
69,712✔
402
      /* First try to bump its hotcount several times. */
403
      val = ((uint32_t)J->penalty[i].val << 1) +
504✔
404
            LJ_PRNG_BITS(J, PENALTY_RNDBITS);
504✔
405
      if (val > PENALTY_MAX) {
504✔
406
        blacklist_pc(pt, pc);  /* Blacklist it, if that didn't help. */
8✔
407
        return;
8✔
408
      }
409
      goto setpenalty;
496✔
410
    }
411
  /* Assign a new penalty cache slot. */
412
  i = J->penaltyslot;
936✔
413
  J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1);
936✔
414
  setmref(J->penalty[i].pc, pc);
936✔
415
setpenalty:
1,432✔
416
  J->penalty[i].val = (uint16_t)val;
1,432✔
417
  J->penalty[i].reason = e;
1,432✔
418
  hotcount_set(J2GG(J), pc+1, val);
1,432✔
419
}
420

421
/* -- Trace compiler state machine ---------------------------------------- */
422

423
/* Start tracing. */
424
static void trace_start(jit_State *J)
5,409✔
425
{
426
  lua_State *L;
5,409✔
427
  TraceNo traceno;
5,409✔
428

429
  if ((J->pt->flags & PROTO_NOJIT)) {  /* JIT disabled for this proto? */
5,409✔
430
    if (J->parent == 0 && J->exitno == 0) {
17✔
431
      /* Lazy bytecode patching to disable hotcount events. */
432
      lj_assertJ(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
17✔
433
                 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF,
434
                 "bad hot bytecode %d", bc_op(*J->pc));
435
      setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP);
17✔
436
      J->pt->flags |= PROTO_ILOOP;
17✔
437
    }
438
    J->state = LJ_TRACE_IDLE;  /* Silently ignored. */
17✔
439
    return;
17✔
440
  }
441

442
  /* Get a new trace number. */
443
  traceno = trace_findfree(J);
5,392✔
444
  if (LJ_UNLIKELY(traceno == 0)) {  /* No free trace? */
5,392✔
445
    lj_assertJ((J2G(J)->hookmask & HOOK_GC) == 0,
×
446
               "recorder called from GC hook");
447
    lj_trace_flushall(J->L);
×
448
    J->state = LJ_TRACE_IDLE;  /* Silently ignored. */
×
449
    return;
×
450
  }
451
  setgcrefp(J->trace[traceno], &J->cur);
5,392✔
452

453
  /* Setup enough of the current trace to be able to send the vmevent. */
454
  memset(&J->cur, 0, sizeof(GCtrace));
5,392✔
455
  J->cur.traceno = traceno;
5,392✔
456
  J->cur.nins = J->cur.nk = REF_BASE;
5,392✔
457
  J->cur.ir = J->irbuf;
5,392✔
458
  J->cur.snap = J->snapbuf;
5,392✔
459
  J->cur.snapmap = J->snapmapbuf;
5,392✔
460
  J->mergesnap = 0;
5,392✔
461
  J->needsnap = 0;
5,392✔
462
  J->bcskip = 0;
5,392✔
463
  J->guardemit.irt = 0;
5,392✔
464
  J->postproc = LJ_POST_NONE;
5,392✔
465
  lj_resetsplit(J);
5,392✔
466
  J->retryrec = 0;
5,392✔
467
  J->ktrace = 0;
5,392✔
468
  setgcref(J->cur.startpt, obj2gco(J->pt));
5,392✔
469

470
  L = J->L;
5,392✔
471
  lj_vmevent_send(L, TRACE,
5,392✔
472
    setstrV(L, L->top++, lj_str_newlit(L, "start"));
473
    setintV(L->top++, traceno);
474
    setfuncV(L, L->top++, J->fn);
475
    setintV(L->top++, proto_bcpos(J->pt, J->pc));
476
    if (J->parent) {
477
      setintV(L->top++, J->parent);
478
      setintV(L->top++, J->exitno);
479
    } else {
480
      BCOp op = bc_op(*J->pc);
481
      if (op == BC_CALLM || op == BC_CALL || op == BC_ITERC) {
482
        setintV(L->top++, J->exitno);  /* Parent of stitched trace. */
483
        setintV(L->top++, -1);
484
      }
485
    }
486
  );
5,392✔
487
  lj_record_setup(J);
5,392✔
488
}
489

490
/* Stop tracing. */
491
static void trace_stop(jit_State *J)
3,720✔
492
{
493
  BCIns *pc = mref(J->cur.startpc, BCIns);
3,720✔
494
  BCOp op = bc_op(J->cur.startins);
3,720✔
495
  GCproto *pt = &gcref(J->cur.startpt)->pt;
3,720✔
496
  TraceNo traceno = J->cur.traceno;
3,720✔
497
  GCtrace *T = J->curfinal;
3,720✔
498
  lua_State *L;
3,720✔
499

500
  switch (op) {
3,720✔
501
  case BC_FORL:
1,094✔
502
    setbc_op(pc+bc_j(J->cur.startins), BC_JFORI);  /* Patch FORI, too. */
1,094✔
503
    /* fallthrough */
504
  case BC_LOOP:
1,362✔
505
  case BC_ITERL:
506
  case BC_FUNCF:
507
    /* Patch bytecode of starting instruction in root trace. */
508
    setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP);
1,362✔
509
    setbc_d(pc, traceno);
1,362✔
510
  addroot:
1,375✔
511
    /* Add to root trace chain in prototype. */
512
    J->cur.nextroot = pt->trace;
1,375✔
513
    pt->trace = (TraceNo1)traceno;
1,375✔
514
    break;
1,375✔
515
  case BC_RET:
13✔
516
  case BC_RET0:
517
  case BC_RET1:
518
    *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno);
13✔
519
    goto addroot;
13✔
520
  case BC_JMP:
2,289✔
521
    /* Patch exit branch in parent to side trace entry. */
522
    lj_assertJ(J->parent != 0 && J->cur.root != 0, "not a side trace");
2,289✔
523
    lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode);
2,289✔
524
    /* Avoid compiling a side trace twice (stack resizing uses parent exit). */
525
    traceref(J, J->parent)->snap[J->exitno].count = SNAPCOUNT_DONE;
2,289✔
526
    /* Add to side trace chain in root trace. */
527
    {
528
      GCtrace *root = traceref(J, J->cur.root);
2,289✔
529
      root->nchild++;
2,289✔
530
      J->cur.nextside = root->nextside;
2,289✔
531
      root->nextside = (TraceNo1)traceno;
2,289✔
532
    }
533
    break;
2,289✔
534
  case BC_CALLM:
56✔
535
  case BC_CALL:
536
  case BC_ITERC:
537
    /* Trace stitching: patch link of previous trace. */
538
    traceref(J, J->exitno)->link = traceno;
56✔
539
    break;
56✔
540
  default:
541
    lj_assertJ(0, "bad stop bytecode %d", op);
542
    break;
543
  }
544

545
  /* Commit new mcode only after all patching is done. */
546
  lj_mcode_commit(J, J->cur.mcode);
3,720✔
547
  J->postproc = LJ_POST_NONE;
3,720✔
548
  trace_save(J, T);
3,720✔
549

550
  L = J->L;
3,720✔
551
  lj_vmevent_send(L, TRACE,
3,720✔
552
    setstrV(L, L->top++, lj_str_newlit(L, "stop"));
553
    setintV(L->top++, traceno);
554
    setfuncV(L, L->top++, J->fn);
555
  );
3,720✔
556
}
3,720✔
557

558
/* Start a new root trace for down-recursion. */
559
static int trace_downrec(jit_State *J)
62✔
560
{
561
  /* Restart recording at the return instruction. */
562
  lj_assertJ(J->pt != NULL, "no active prototype");
62✔
563
  lj_assertJ(bc_isret(bc_op(*J->pc)), "not at a return bytecode");
62✔
564
  if (bc_op(*J->pc) == BC_RETM) {
62✔
565
    J->ntraceabort++;
×
566
    return 0;  /* NYI: down-recursion with RETM. */
×
567
  }
568
  J->parent = 0;
62✔
569
  J->exitno = 0;
62✔
570
  J->state = LJ_TRACE_RECORD;
62✔
571
  trace_start(J);
62✔
572
  return 1;
62✔
573
}
574

575
/* Abort tracing. */
576
static int trace_abort(jit_State *J)
1,691✔
577
{
578
  lua_State *L = J->L;
1,691✔
579
  TraceError e = LJ_TRERR_RECERR;
1,691✔
580
  TraceNo traceno;
1,691✔
581

582
  J->postproc = LJ_POST_NONE;
1,691✔
583
  lj_mcode_abort(J);
1,691✔
584
  if (J->curfinal) {
1,691✔
585
    lj_trace_free(J2G(J), J->curfinal);
24✔
586
    J->curfinal = NULL;
24✔
587
  }
588
  if (tvisnumber(L->top-1))
1,691✔
589
    e = (TraceError)numberVint(L->top-1);
1,690✔
590
  if (e == LJ_TRERR_MCODELM) {
1,690✔
591
    L->top--;  /* Remove error object */
19✔
592
    J->state = LJ_TRACE_ASM;
19✔
593
    return 1;  /* Retry ASM with new MCode area. */
19✔
594
  }
595
  /* Penalize or blacklist starting bytecode instruction. */
596
  if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
1,672✔
597
    if (J->exitno == 0) {
1,451✔
598
      BCIns *startpc = mref(J->cur.startpc, BCIns);
1,440✔
599
      if (e == LJ_TRERR_RETRY)
1,440✔
600
        hotcount_set(J2GG(J), startpc+1, 1);  /* Immediate retry. */
×
601
      else
602
        penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e);
1,440✔
603
    } else {
604
      traceref(J, J->exitno)->link = J->exitno;  /* Self-link is blacklisted. */
11✔
605
    }
606
  }
607

608
  /* Is there anything to abort? */
609
  traceno = J->cur.traceno;
1,672✔
610
  if (traceno) {
1,672✔
611
    ptrdiff_t errobj = savestack(L, L->top-1);  /* Stack may be resized. */
1,671✔
612
    J->cur.link = 0;
1,671✔
613
    J->cur.linktype = LJ_TRLINK_NONE;
1,671✔
614
    lj_vmevent_send(L, TRACE,
1,671✔
615
      TValue *frame;
616
      const BCIns *pc;
617
      GCfunc *fn;
618
      setstrV(L, L->top++, lj_str_newlit(L, "abort"));
619
      setintV(L->top++, traceno);
620
      /* Find original Lua function call to generate a better error message. */
621
      frame = J->L->base-1;
622
      pc = J->pc;
623
      while (!isluafunc(frame_func(frame))) {
624
        pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1;
625
        frame = frame_prev(frame);
626
      }
627
      fn = frame_func(frame);
628
      setfuncV(L, L->top++, fn);
629
      setintV(L->top++, proto_bcpos(funcproto(fn), pc));
630
      copyTV(L, L->top++, restorestack(L, errobj));
631
      copyTV(L, L->top++, &J->errinfo);
632
    );
1,671✔
633
    /* Drop aborted trace after the vmevent (which may still access it). */
634
    setgcrefnull(J->trace[traceno]);
1,671✔
635
    if (traceno < J->freetrace)
1,671✔
636
      J->freetrace = traceno;
1,654✔
637
    J->cur.traceno = 0;
1,671✔
638
  }
639
  L->top--;  /* Remove error object */
1,672✔
640
  if (e == LJ_TRERR_DOWNREC)
1,672✔
641
    return trace_downrec(J);
124✔
642
  else if (e == LJ_TRERR_MCODEAL)
1,610✔
643
    lj_trace_flushall(L);
×
644
  J->ntraceabort++;
1,610✔
645
  return 0;
1,610✔
646
}
647

648
/* Perform pending re-patch of a bytecode instruction. */
649
static LJ_AINLINE void trace_pendpatch(jit_State *J, int force)
145,068✔
650
{
651
  if (LJ_UNLIKELY(J->patchpc)) {
145,068✔
652
    if (force || J->bcskip == 0) {
9,916✔
653
      *J->patchpc = J->patchins;
9,842✔
654
      J->patchpc = NULL;
9,842✔
655
    } else {
656
      J->bcskip = 0;
74✔
657
    }
658
  }
659
}
660

661
/* State machine for the trace compiler. Protected callback. */
662
static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud)
146,624✔
663
{
664
  jit_State *J = (jit_State *)ud;
146,624✔
665
  UNUSED(dummy);
154,159✔
666
  do {
154,159✔
667
  retry:
7,454✔
668
    switch (J->state) {
154,159✔
669
    case LJ_TRACE_START:
5,347✔
670
      J->state = LJ_TRACE_RECORD;  /* trace_start() may change state. */
5,347✔
671
      trace_start(J);
5,347✔
672
      lj_dispatch_update(J2G(J));
5,347✔
673
      break;
5,347✔
674

675
    case LJ_TRACE_RECORD:
676
      trace_pendpatch(J, 0);
139,648✔
677
      setvmstate(J2G(J), RECORD);
139,648✔
678
      lj_vmevent_send_(L, RECORD,
139,648✔
679
        /* Save/restore state for trace recorder. */
680
        TValue savetv = J2G(J)->tmptv;
681
        TValue savetv2 = J2G(J)->tmptv2;
682
        TraceNo parent = J->parent;
683
        ExitNo exitno = J->exitno;
684
        setintV(L->top++, J->cur.traceno);
685
        setfuncV(L, L->top++, J->fn);
686
        setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1);
687
        setintV(L->top++, J->framedepth);
688
      ,
689
        J2G(J)->tmptv = savetv;
690
        J2G(J)->tmptv2 = savetv2;
691
        J->parent = parent;
692
        J->exitno = exitno;
693
      );
139,648✔
694
      lj_record_ins(J);
139,648✔
695
      break;
139,648✔
696

697
    case LJ_TRACE_END:
698
      trace_pendpatch(J, 1);
3,729✔
699
      J->loopref = 0;
3,729✔
700
      if ((J->flags & JIT_F_OPT_LOOP) &&
3,729✔
701
          J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) {
3,692✔
702
        setvmstate(J2G(J), OPT);
1,270✔
703
        lj_opt_dce(J);
1,270✔
704
        if (lj_opt_loop(J)) {  /* Loop optimization failed? */
1,270✔
705
          J->cur.link = 0;
4✔
706
          J->cur.linktype = LJ_TRLINK_NONE;
4✔
707
          J->loopref = J->cur.nins;
4✔
708
          J->state = LJ_TRACE_RECORD;  /* Try to continue recording. */
4✔
709
          break;
4✔
710
        }
711
        J->loopref = J->chain[IR_LOOP];  /* Needed by assembler. */
1,266✔
712
      }
713
      lj_opt_split(J);
3,725✔
714
      lj_opt_sink(J);
3,725✔
715
      if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE;
3,725✔
716
      J->state = LJ_TRACE_ASM;
3,725✔
717
      break;
3,725✔
718

719
    case LJ_TRACE_ASM:
3,744✔
720
      setvmstate(J2G(J), ASM);
3,744✔
721
      lj_asm_trace(J, &J->cur);
3,744✔
722
      trace_stop(J);
3,720✔
723
      setvmstate(J2G(J), INTERP);
3,720✔
724
      J->state = LJ_TRACE_IDLE;
3,720✔
725
      lj_dispatch_update(J2G(J));
3,720✔
726
      return NULL;
3,720✔
727

728
    default:  /* Trace aborted asynchronously. */
786✔
729
      setintV(L->top++, (int32_t)LJ_TRERR_RECERR);
786✔
730
      /* fallthrough */
731
    case LJ_TRACE_ERR:
1,691✔
732
      trace_pendpatch(J, 1);
1,691✔
733
      if (trace_abort(J))
1,691✔
734
        goto retry;
81✔
735
      setvmstate(J2G(J), INTERP);
1,610✔
736
      J->state = LJ_TRACE_IDLE;
1,610✔
737
      lj_dispatch_update(J2G(J));
1,610✔
738
      return NULL;
1,610✔
739
    }
740
  } while (J->state > LJ_TRACE_RECORD);
147,843✔
741
  return NULL;
742
}
743

744
/* -- Event handling ------------------------------------------------------ */
745

746
/* A bytecode instruction is about to be executed. Record it. */
747
void lj_trace_ins(jit_State *J, const BCIns *pc)
145,719✔
748
{
749
  /* Note: J->L must already be set. pc is the true bytecode PC here. */
750
  J->pc = pc;
145,719✔
751
  J->fn = curr_func(J->L);
145,719✔
752
  J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL;
145,719✔
753
  while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0)
146,624✔
754
    J->state = LJ_TRACE_ERR;
905✔
755
}
145,719✔
756

757
/* A hotcount triggered. Start recording a root trace. */
758
void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
11,759✔
759
{
760
  /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */
761
  ERRNO_SAVE
11,759✔
762
  /* Reset hotcount. */
763
  hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP);
11,759✔
764
  /* Only start a new trace if not recording or inside __gc call or vmevent. */
765
  if (J->state == LJ_TRACE_IDLE &&
11,759✔
766
      !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
11,751✔
767
    J->parent = 0;  /* Root trace. */
2,819✔
768
    J->exitno = 0;
2,819✔
769
    J->state = LJ_TRACE_START;
2,819✔
770
    lj_trace_ins(J, pc-1);
2,819✔
771
  }
772
  ERRNO_RESTORE
11,759✔
773
}
11,759✔
774

775
/* Check for a hot side exit. If yes, start recording a side trace. */
776
static void trace_hotside(jit_State *J, const BCIns *pc)
29,756✔
777
{
778
  SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
29,756✔
779
  if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
29,756✔
780
      isluafunc(curr_func(J->L)) &&
29,741✔
781
      snap->count != SNAPCOUNT_DONE &&
29,738✔
782
      ++snap->count >= J->param[JIT_P_hotexit]) {
29,727✔
783
    lj_assertJ(J->state == LJ_TRACE_IDLE, "hot side exit while recording");
2,461✔
784
    /* J->parent is non-zero for a side trace. */
785
    J->state = LJ_TRACE_START;
2,461✔
786
    lj_trace_ins(J, pc);
2,461✔
787
  }
788
}
29,756✔
789

790
/* Stitch a new trace to the previous trace. */
791
void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc)
117✔
792
{
793
  /* Only start a new trace if not recording or inside __gc call or vmevent. */
794
  if (J->state == LJ_TRACE_IDLE &&
117✔
795
      !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
67✔
796
    J->parent = 0;  /* Have to treat it like a root trace. */
67✔
797
    /* J->exitno is set to the invoking trace. */
798
    J->state = LJ_TRACE_START;
67✔
799
    lj_trace_ins(J, pc);
67✔
800
  }
801
}
117✔
802

803

804
/* Tiny struct to pass data to protected call. */
805
typedef struct ExitDataCP {
806
  jit_State *J;
807
  void *exptr;                /* Pointer to exit state. */
808
  const BCIns *pc;        /* Restart interpreter at this PC. */
809
} ExitDataCP;
810

811
/* Need to protect lj_snap_restore because it may throw. */
812
static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud)
29,831✔
813
{
814
  ExitDataCP *exd = (ExitDataCP *)ud;
29,831✔
815
  /* Always catch error here and don't call error function. */
816
  cframe_errfunc(L->cframe) = 0;
29,831✔
817
  cframe_nres(L->cframe) = -2*LUAI_MAXSTACK*(int)sizeof(TValue);
29,831✔
818
  exd->pc = lj_snap_restore(exd->J, exd->exptr);
29,831✔
819
  UNUSED(dummy);
29,830✔
820
  return NULL;
29,830✔
821
}
822

823
#ifndef LUAJIT_DISABLE_VMEVENT
824
/* Push all registers from exit state. */
825
static void trace_exit_regs(lua_State *L, ExitState *ex)
826
{
827
  int32_t i;
828
  setintV(L->top++, RID_NUM_GPR);
829
  setintV(L->top++, RID_NUM_FPR);
830
  for (i = 0; i < RID_NUM_GPR; i++) {
831
    if (sizeof(ex->gpr[i]) == sizeof(int32_t))
832
      setintV(L->top++, (int32_t)ex->gpr[i]);
833
    else
834
      setnumV(L->top++, (lua_Number)ex->gpr[i]);
835
  }
836
#if !LJ_SOFTFP
837
  for (i = 0; i < RID_NUM_FPR; i++) {
838
    setnumV(L->top, ex->fpr[i]);
839
    if (LJ_UNLIKELY(tvisnan(L->top)))
840
      setnanV(L->top);
841
    L->top++;
842
  }
843
#endif
844
}
845
#endif
846

847
#if defined(EXITSTATE_PCREG) || (LJ_UNWIND_JIT && !EXITTRACE_VMSTATE)
848
/* Determine trace number from pc of exit instruction. */
849
static TraceNo trace_exit_find(jit_State *J, MCode *pc)
850
{
851
  TraceNo traceno;
852
  for (traceno = 1; traceno < J->sizetrace; traceno++) {
853
    GCtrace *T = traceref(J, traceno);
854
    if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode))
855
      return traceno;
856
  }
857
  lj_assertJ(0, "bad exit pc");
858
  return 0;
859
}
860
#endif
861

862
/* A trace exited. Restore interpreter state. */
863
int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
29,831✔
864
{
865
  ERRNO_SAVE
29,831✔
866
  lua_State *L = J->L;
29,831✔
867
  ExitState *ex = (ExitState *)exptr;
29,831✔
868
  ExitDataCP exd;
29,831✔
869
  int errcode, exitcode = J->exitcode;
29,831✔
870
  TValue exiterr;
29,831✔
871
  const BCIns *pc;
29,831✔
872
  void *cf;
29,831✔
873
  GCtrace *T;
29,831✔
874

875
  setnilV(&exiterr);
29,831✔
876
  if (exitcode) {  /* Trace unwound with error code. */
29,831✔
877
    J->exitcode = 0;
1✔
878
    copyTV(L, &exiterr, L->top-1);
1✔
879
  }
880

881
#ifdef EXITSTATE_PCREG
882
  J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]);
883
#endif
884
  T = traceref(J, J->parent); UNUSED(T);
29,831✔
885
#ifdef EXITSTATE_CHECKEXIT
886
  if (J->exitno == T->nsnap) {  /* Treat stack check like a parent exit. */
887
    lj_assertJ(T->root != 0, "stack check in root trace");
888
    J->exitno = T->ir[REF_BASE].op2;
889
    J->parent = T->ir[REF_BASE].op1;
890
    T = traceref(J, J->parent);
891
  }
892
#endif
893
  lj_assertJ(T != NULL && J->exitno < T->nsnap, "bad trace or exit number");
29,831✔
894
  exd.J = J;
29,831✔
895
  exd.exptr = exptr;
29,831✔
896
  errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
29,831✔
897
  if (errcode)
29,831✔
898
    return -errcode;  /* Return negated error code. */
1✔
899

900
  if (exitcode) copyTV(L, L->top++, &exiterr);  /* Anchor the error object. */
29,830✔
901

902
  if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)))
29,830✔
903
    lj_vmevent_send(L, TEXIT,
29,828✔
904
      lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
905
      setintV(L->top++, J->parent);
906
      setintV(L->top++, J->exitno);
907
      trace_exit_regs(L, ex);
908
    );
29,830✔
909

910
  pc = exd.pc;
29,830✔
911
  cf = cframe_raw(L->cframe);
29,830✔
912
  setcframe_pc(cf, pc);
29,830✔
913
  if (exitcode) {
29,830✔
914
    return -exitcode;
1✔
915
  } else if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) {
29,829✔
916
    /* Just exit to interpreter. */
917
  } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) {
29,827✔
918
    if (!(G(L)->hookmask & HOOK_GC))
71✔
919
      lj_gc_step(L);  /* Exited because of GC: drive GC forward. */
71✔
920
  } else {
921
    trace_hotside(J, pc);
29,756✔
922
  }
923
  if (bc_op(*pc) == BC_JLOOP) {
29,829✔
924
    BCIns *retpc = &traceref(J, bc_d(*pc))->startins;
846✔
925
    if (bc_isret(bc_op(*retpc))) {
846✔
926
      if (J->state == LJ_TRACE_RECORD) {
837✔
927
        J->patchins = *pc;
74✔
928
        J->patchpc = (BCIns *)pc;
74✔
929
        *J->patchpc = *retpc;
74✔
930
        J->bcskip = 1;
74✔
931
      } else {
932
        pc = retpc;
763✔
933
        setcframe_pc(cf, pc);
763✔
934
      }
935
    }
936
  }
937
  /* Return MULTRES or 0. */
938
  ERRNO_RESTORE
29,829✔
939
  switch (bc_op(*pc)) {
29,829✔
940
  case BC_CALLM: case BC_CALLMT:
108✔
941
    return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) - LJ_FR2);
108✔
942
  case BC_RETM:
×
943
    return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc));
×
944
  case BC_TSETM:
×
945
    return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc));
×
946
  default:
29,721✔
947
    if (bc_op(*pc) >= BC_FUNCF)
29,721✔
948
      return (int)((BCReg)(L->top - L->base) + 1);
3✔
949
    return 0;
950
  }
951
}
952

953
#if LJ_UNWIND_JIT
954
/* Given an mcode address determine trace exit address for unwinding. */
955
uintptr_t LJ_FASTCALL lj_trace_unwind(jit_State *J, uintptr_t addr, ExitNo *ep)
1✔
956
{
957
#if EXITTRACE_VMSTATE
958
  TraceNo traceno = J2G(J)->vmstate;
1✔
959
#else
960
  TraceNo traceno = trace_exit_find(J, (MCode *)addr);
961
#endif
962
  GCtrace *T = traceref(J, traceno);
1✔
963
  if (T
1✔
964
#if EXITTRACE_VMSTATE
965
      && addr >= (uintptr_t)T->mcode && addr < (uintptr_t)T->mcode + T->szmcode
1✔
966
#endif
967
     ) {
968
    SnapShot *snap = T->snap;
1✔
969
    SnapNo lo = 0, exitno = T->nsnap;
1✔
970
    uintptr_t ofs = (uintptr_t)((MCode *)addr - T->mcode);  /* MCode units! */
1✔
971
    /* Rightmost binary search for mcode offset to determine exit number. */
972
    do {
2✔
973
      SnapNo mid = (lo+exitno) >> 1;
2✔
974
      if (ofs < snap[mid].mcofs) exitno = mid; else lo = mid + 1;
2✔
975
    } while (lo < exitno);
2✔
976
    exitno--;
1✔
977
    *ep = exitno;
1✔
978
#ifdef EXITSTUBS_PER_GROUP
979
    return (uintptr_t)exitstub_addr(J, exitno);
1✔
980
#else
981
    return (uintptr_t)exitstub_trace_addr(T, exitno);
982
#endif
983
  }
984
  lj_assertJ(0, "bad exit pc");
985
  return 0;
986
}
987
#endif
988

989
#endif
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc