• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 15440738150

04 Jun 2025 11:09AM UTC coverage: 93.028% (+0.02%) from 93.008%
15440738150

push

github

ligurio
sysprof: allow calling sysprof.report before stop

It is not allowed to call a function `sysprof.report()` without
stopping the profiler. However, sometimes it may be useful to
analyze numbers provided by the report without stopping the
profiler. The patch removes the appropriate condition and allows
reporting without stopping.

Resolves tarantool/tarantool#11229

5705 of 6041 branches covered (94.44%)

Branch coverage included in aggregate %.

21767 of 23490 relevant lines covered (92.66%)

3829584.94 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.02
/src/lj_gc.c
1
/*
2
** Garbage collector.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
**
5
** Major portions taken verbatim or adapted from the Lua interpreter.
6
** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7
*/
8

9
#define lj_gc_c
10
#define LUA_CORE
11

12
#include "lj_obj.h"
13
#include "lj_gc.h"
14
#include "lj_err.h"
15
#include "lj_buf.h"
16
#include "lj_str.h"
17
#include "lj_tab.h"
18
#include "lj_func.h"
19
#include "lj_udata.h"
20
#include "lj_meta.h"
21
#include "lj_state.h"
22
#include "lj_frame.h"
23
#if LJ_HASFFI
24
#include "lj_ctype.h"
25
#include "lj_cdata.h"
26
#endif
27
#include "lj_trace.h"
28
#include "lj_dispatch.h"
29
#include "lj_vm.h"
30
#include "lj_vmevent.h"
31

32
#define GCSTEPSIZE        1024u
33
#define GCSWEEPMAX        40
34
#define GCSWEEPCOST        10
35
#define GCFINALIZECOST        100
36

37
/* Macros to set GCobj colors and flags. */
38
#define white2gray(x)                ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
39
#define gray2black(x)                ((x)->gch.marked |= LJ_GC_BLACK)
40
#define isfinalized(u)                ((u)->marked & LJ_GC_FINALIZED)
41

42
/* -- Mark phase ---------------------------------------------------------- */
43

44
/* Mark a TValue (if needed). */
45
#define gc_marktv(g, tv) \
46
  { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \
47
               "TValue and GC type mismatch"); \
48
    if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
49

50
/* Mark a GCobj (if needed). */
51
#define gc_markobj(g, o) \
52
  { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
53

54
/* Mark a string object. */
55
#define gc_mark_str(s)                ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
56

57
/* Mark a white GCobj. */
58
static void gc_mark(global_State *g, GCobj *o)
211,264,796✔
59
{
60
  int gct = o->gch.gct;
211,271,549✔
61
  lj_assertG(iswhite(o), "mark of non-white object");
211,271,549✔
62
  lj_assertG(!isdead(g, o), "mark of dead object");
211,271,549✔
63
  white2gray(o);
211,271,549✔
64
  if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
211,271,549✔
65
    GCtab *mt = tabref(gco2ud(o)->metatable);
1,055,537✔
66
    gray2black(o);  /* Userdata are never gray. */
1,055,537✔
67
    if (mt) gc_markobj(g, mt);
1,055,537✔
68
    gc_markobj(g, tabref(gco2ud(o)->env));
1,055,537✔
69
  } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
210,216,012✔
70
    GCupval *uv = gco2uv(o);
1,719,523✔
71
    gc_marktv(g, uvval(uv));
1,719,523✔
72
    if (uv->closed)
1,719,523✔
73
      gray2black(o);  /* Closed upvalues are never gray. */
152,771✔
74
  } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
208,496,489✔
75
    lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
8,825,057✔
76
               gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE,
77
               "bad GC type %d", gct);
78
    setgcrefr(o->gch.gclist, g->gc.gray);
8,825,057✔
79
    setgcref(g->gc.gray, o);
8,825,057✔
80
  }
81
}
211,264,796✔
82

83
/* Mark GC roots. */
84
static void gc_mark_gcroot(global_State *g)
13,603✔
85
{
86
  ptrdiff_t i;
13,603✔
87
  for (i = 0; i < GCROOT_MAX; i++)
544,120✔
88
    if (gcref(g->gcroot[i]) != NULL)
530,517✔
89
      gc_markobj(g, gcref(g->gcroot[i]));
365,200✔
90
}
13,603✔
91

92
/* Start a GC cycle and mark the root set. */
93
static void gc_mark_start(global_State *g)
6,824✔
94
{
95
  setgcrefnull(g->gc.gray);
6,824✔
96
  setgcrefnull(g->gc.grayagain);
6,824✔
97
  setgcrefnull(g->gc.weak);
6,824✔
98
  gc_markobj(g, mainthread(g));
6,824✔
99
  gc_markobj(g, tabref(mainthread(g)->env));
6,824✔
100
  gc_marktv(g, &g->registrytv);
6,824✔
101
  gc_mark_gcroot(g);
6,824✔
102
  g->gc.state = GCSpropagate;
6,824✔
103
}
6,824✔
104

105
/* Mark open upvalues. */
106
static void gc_mark_uv(global_State *g)
6,779✔
107
{
108
  GCupval *uv;
6,779✔
109
  for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
1,573,455✔
110
    lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
1,566,676✔
111
               "broken upvalue chain");
112
    if (isgray(obj2gco(uv)))
1,566,676✔
113
      gc_marktv(g, uvval(uv));
1,566,676✔
114
  }
115
}
6,779✔
116

117
/* Mark userdata in mmudata list. */
118
static void gc_mark_mmudata(global_State *g)
6,779✔
119
{
120
  GCobj *root = gcref(g->gc.mmudata);
6,779✔
121
  GCobj *u = root;
6,779✔
122
  if (u) {
6,779✔
123
    do {
1,002,734✔
124
      u = gcnext(u);
1,002,734✔
125
      makewhite(g, u);  /* Could be from previous GC. */
1,002,734✔
126
      gc_mark(g, u);
1,002,734✔
127
    } while (u != root);
1,002,734✔
128
  }
129
}
6,779✔
130

131
/* Separate userdata objects to be finalized to mmudata list. */
132
size_t lj_gc_separateudata(global_State *g, int all)
7,529✔
133
{
134
  size_t m = 0;
7,529✔
135
  GCRef *p = &mainthread(g)->nextgc;
7,529✔
136
  GCobj *o;
7,529✔
137
  while ((o = gcref(*p)) != NULL) {
2,072,393✔
138
    if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
2,064,864✔
139
      p = &o->gch.nextgc;  /* Nothing to do. */
1,057,916✔
140
    } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
1,006,948✔
141
      markfinalized(o);  /* Done, as there's no __gc metamethod. */
2,382✔
142
      p = &o->gch.nextgc;
2,382✔
143
    } else {  /* Otherwise move userdata to be finalized to mmudata list. */
144
      m += sizeudata(gco2ud(o));
1,004,566✔
145
      markfinalized(o);
1,004,566✔
146
      *p = o->gch.nextgc;
1,004,566✔
147
      if (gcref(g->gc.mmudata)) {  /* Link to end of mmudata list. */
1,004,566✔
148
        GCobj *root = gcref(g->gc.mmudata);
998,214✔
149
        setgcrefr(o->gch.nextgc, root->gch.nextgc);
998,214✔
150
        setgcref(root->gch.nextgc, o);
998,214✔
151
        setgcref(g->gc.mmudata, o);
998,214✔
152
      } else {  /* Create circular list. */
153
        setgcref(o->gch.nextgc, o);
6,352✔
154
        setgcref(g->gc.mmudata, o);
6,352✔
155
      }
156
    }
157
  }
158
  return m;
7,529✔
159
}
160

161
/* -- Propagation phase --------------------------------------------------- */
162

163
/* Traverse a table. */
164
static int gc_traverse_tab(global_State *g, GCtab *t)
3,815,746✔
165
{
166
  int weak = 0;
3,815,746✔
167
  cTValue *mode;
3,815,746✔
168
  GCtab *mt = tabref(t->metatable);
3,815,746✔
169
  if (mt)
3,815,746✔
170
    gc_markobj(g, mt);
36,180✔
171
  mode = lj_meta_fastg(g, mt, MM_mode);
3,815,746✔
172
  if (mode && tvisstr(mode)) {  /* Valid __mode field? */
20,712✔
173
    const char *modestr = strVdata(mode);
20,571✔
174
    int c;
20,571✔
175
    while ((c = *modestr++)) {
54,775✔
176
      if (c == 'k') weak |= LJ_GC_WEAKKEY;
34,204✔
177
      else if (c == 'v') weak |= LJ_GC_WEAKVAL;
13,655✔
178
    }
179
    if (weak) {  /* Weak tables are cleared in the atomic phase. */
20,571✔
180
#if LJ_HASFFI
181
      if (gcref(g->gcroot[GCROOT_FFI_FIN]) == obj2gco(t)) {
20,571✔
182
        weak = (int)(~0u & ~LJ_GC_WEAKVAL);
183
      } else
184
#endif
185
      {
186
        t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
13,657✔
187
        setgcrefr(t->gclist, g->gc.weak);
13,657✔
188
        setgcref(g->gc.weak, obj2gco(t));
13,657✔
189
      }
190
    }
191
  }
192
  if (weak == LJ_GC_WEAK)  /* Nothing to mark if both keys/values are weak. */
29,125✔
193
    return 1;
194
  if (!(weak & LJ_GC_WEAKVAL)) {  /* Mark array part. */
3,802,113✔
195
    MSize i, asize = t->asize;
3,802,091✔
196
    for (i = 0; i < asize; i++)
748,079,885✔
197
      gc_marktv(g, arrayslot(t, i));
744,277,794✔
198
  }
199
  if (t->hmask > 0) {  /* Mark hash part. */
3,802,113✔
200
    Node *node = noderef(t->node);
1,760,822✔
201
    MSize i, hmask = t->hmask;
1,760,822✔
202
    for (i = 0; i <= hmask; i++) {
874,975,406✔
203
      Node *n = &node[i];
873,214,584✔
204
      if (!tvisnil(&n->val)) {  /* Mark non-empty slot. */
873,214,584✔
205
        lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot");
533,849,657✔
206
        if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
533,849,657✔
207
        if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
873,214,584✔
208
      }
209
    }
210
  }
211
  return weak;
212
}
213

214
/* Traverse a function. */
215
static void gc_traverse_func(global_State *g, GCfunc *fn)
3,636,183✔
216
{
217
  gc_markobj(g, tabref(fn->c.env));
3,636,183✔
218
  if (isluafunc(fn)) {
3,636,183✔
219
    uint32_t i;
2,253,334✔
220
    lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv,
2,253,334✔
221
               "function upvalues out of range");
222
    gc_markobj(g, funcproto(fn));
2,253,334✔
223
    for (i = 0; i < fn->l.nupvalues; i++)  /* Mark Lua function upvalues. */
4,247,992✔
224
      gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
1,994,658✔
225
  } else {
226
    uint32_t i;
227
    for (i = 0; i < fn->c.nupvalues; i++)  /* Mark C function upvalues. */
1,518,978✔
228
      gc_marktv(g, &fn->c.upvalue[i]);
136,129✔
229
  }
230
}
3,636,183✔
231

232
#if LJ_HASJIT
233
/* Mark a trace. */
234
static void gc_marktrace(global_State *g, TraceNo traceno)
111,181✔
235
{
236
  GCobj *o = obj2gco(traceref(G2J(g), traceno));
111,181✔
237
  lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped");
111,181✔
238
  if (iswhite(o)) {
111,181✔
239
    white2gray(o);
62,315✔
240
    setgcrefr(o->gch.gclist, g->gc.gray);
62,315✔
241
    setgcref(g->gc.gray, o);
62,315✔
242
  }
243
}
244

245
/* Traverse a trace. */
246
static void gc_traverse_trace(global_State *g, GCtrace *T)
69,117✔
247
{
248
  IRRef ref;
69,117✔
249
  if (T->traceno == 0) return;
69,117✔
250
  for (ref = T->nk; ref < REF_TRUE; ref++) {
835,602✔
251
    IRIns *ir = &T->ir[ref];
773,294✔
252
    if (ir->o == IR_KGC)
773,294✔
253
      gc_markobj(g, ir_kgc(ir));
276,316✔
254
    if (irt_is64(ir->t) && ir->o != IR_KNULL)
773,294✔
255
      ref++;
410,015✔
256
  }
257
  if (T->link) gc_marktrace(g, T->link);
62,308✔
258
  if (T->nextroot) gc_marktrace(g, T->nextroot);
62,308✔
259
  if (T->nextside) gc_marktrace(g, T->nextside);
62,308✔
260
  gc_markobj(g, gcref(T->startpt));
62,308✔
261
}
262

263
/* The current trace is a GC root while not anchored in the prototype (yet). */
264
#define gc_traverse_curtrace(g)        gc_traverse_trace(g, &G2J(g)->cur)
265
#else
266
#define gc_traverse_curtrace(g)        UNUSED(g)
267
#endif
268

269
/* Traverse a prototype. */
270
static void gc_traverse_proto(global_State *g, GCproto *pt)
595,213✔
271
{
272
  ptrdiff_t i;
595,213✔
273
  gc_mark_str(proto_chunkname(pt));
595,213✔
274
  for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++)  /* Mark collectable consts. */
3,182,894✔
275
    gc_markobj(g, proto_kgc(pt, i));
2,587,681✔
276
#if LJ_HASJIT
277
  if (pt->trace) gc_marktrace(g, pt->trace);
595,213✔
278
#endif
279
}
595,213✔
280

281
/* Traverse the frame structure of a stack. */
282
static MSize gc_traverse_frames(global_State *g, lua_State *th)
283
{
284
  TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
285
  /* Note: extra vararg frame not skipped, marks function twice (harmless). */
286
  for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
287
    GCfunc *fn = frame_func(frame);
288
    TValue *ftop = frame;
289
    if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
290
    if (ftop > top) top = ftop;
291
    if (!LJ_FR2) gc_markobj(g, fn);  /* Need to mark hidden function (or L). */
292
  }
293
  top++;  /* Correct bias of -1 (frame == base-1). */
294
  if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
295
  return (MSize)(top - bot);  /* Return minimum needed stack size. */
296
}
297

298
/* Traverse a thread object. */
299
static void gc_traverse_thread(global_State *g, lua_State *th)
1,570,901✔
300
{
301
  TValue *o, *top = th->top;
1,570,901✔
302
  for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
11,564,320✔
303
    gc_marktv(g, o);
9,993,419✔
304
  if (g->gc.state == GCSatomic) {
1,570,901✔
305
    top = tvref(th->stack) + th->stacksize;
784,982✔
306
    for (; o < top; o++)  /* Clear unmarked slots. */
31,726,500✔
307
      setnilV(o);
30,941,518✔
308
  }
309
  gc_markobj(g, tabref(th->env));
1,570,901✔
310
  lj_state_shrinkstack(th, gc_traverse_frames(g, th));
1,570,901✔
311
}
1,570,901✔
312

313
/* Propagate one gray object. Traverse it and turn it black. */
314
static size_t propagatemark(global_State *g)
9,680,381✔
315
{
316
  GCobj *o = gcref(g->gc.gray);
9,680,381✔
317
  int gct = o->gch.gct;
9,680,381✔
318
  lj_assertG(isgray(o), "propagation of non-gray object");
9,680,381✔
319
  gray2black(o);
9,680,381✔
320
  setgcrefr(g->gc.gray, o->gch.gclist);  /* Remove from gray list. */
9,680,381✔
321
  if (LJ_LIKELY(gct == ~LJ_TTAB)) {
9,680,381✔
322
    GCtab *t = gco2tab(o);
3,815,746✔
323
    if (gc_traverse_tab(g, t) > 0)
3,815,746✔
324
      black2gray(o);  /* Keep weak tables gray. */
13,657✔
325
    return sizeof(GCtab) + sizeof(TValue) * t->asize +
3,815,746✔
326
                           (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0);
3,815,746✔
327
  } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
5,864,635✔
328
    GCfunc *fn = gco2func(o);
3,636,183✔
329
    gc_traverse_func(g, fn);
3,636,183✔
330
    return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
3,636,183✔
331
                           sizeCfunc((MSize)fn->c.nupvalues);
1,382,849✔
332
  } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
2,228,452✔
333
    GCproto *pt = gco2pt(o);
595,213✔
334
    gc_traverse_proto(g, pt);
595,213✔
335
    return pt->sizept;
595,213✔
336
  } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
1,633,239✔
337
    lua_State *th = gco2th(o);
1,570,901✔
338
    setgcrefr(th->gclist, g->gc.grayagain);
1,570,901✔
339
    setgcref(g->gc.grayagain, o);
1,570,901✔
340
    black2gray(o);  /* Threads are never black. */
1,570,901✔
341
    gc_traverse_thread(g, th);
1,570,901✔
342
    return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
1,570,901✔
343
  } else {
344
#if LJ_HASJIT
345
    GCtrace *T = gco2trace(o);
62,338✔
346
    gc_traverse_trace(g, T);
62,338✔
347
    return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
62,338✔
348
           T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
62,338✔
349
#else
350
    lj_assertG(0, "bad GC type %d", gct);
351
    return 0;
352
#endif
353
  }
354
}
355

356
/* Propagate all gray objects. */
357
static size_t gc_propagate_gray(global_State *g)
27,116✔
358
{
359
  size_t m = 0;
13,558✔
360
  while (gcref(g->gc.gray) != NULL)
2,974,490✔
361
    m += propagatemark(g);
2,947,374✔
362
  return m;
27,116✔
363
}
364

365
/* -- Sweep phase --------------------------------------------------------- */
366

367
/* Type of GC free functions. */
368
typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
369

370
/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
371
static const GCFreeFunc gc_freefunc[] = {
372
  (GCFreeFunc)lj_str_free,
373
  (GCFreeFunc)lj_func_freeuv,
374
  (GCFreeFunc)lj_state_free,
375
  (GCFreeFunc)lj_func_freeproto,
376
  (GCFreeFunc)lj_func_free,
377
#if LJ_HASJIT
378
  (GCFreeFunc)lj_trace_free,
379
#else
380
  (GCFreeFunc)0,
381
#endif
382
#if LJ_HASFFI
383
  (GCFreeFunc)lj_cdata_free,
384
#else
385
  (GCFreeFunc)0,
386
#endif
387
  (GCFreeFunc)lj_tab_free,
388
  (GCFreeFunc)lj_udata_free
389
};
390

391
/* Full sweep of a GC list. */
392
#define gc_fullsweep(g, p)        gc_sweep(g, (p), ~(uint32_t)0)
393

394
/* Partial sweep of a GC list. */
395
static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
10,030,803✔
396
{
397
  /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
398
  int ow = otherwhite(g);
10,030,803✔
399
  GCobj *o;
10,030,803✔
400
  while ((o = gcref(*p)) != NULL && lim-- > 0) {
418,705,794✔
401
    if (o->gch.gct == ~LJ_TTHREAD)  /* Need to sweep open upvalues, too. */
408,674,991✔
402
      gc_fullsweep(g, &gco2th(o)->openupval);
1,478,611✔
403
    if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
408,674,991✔
404
      lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
212,952,581✔
405
                 "sweep of undead object");
406
      makewhite(g, o);  /* Value is alive, change to the current white. */
212,952,581✔
407
      p = &o->gch.nextgc;
212,952,581✔
408
    } else {  /* Otherwise value is dead, free it. */
409
      lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
195,722,410✔
410
                 "sweep of unlive object");
411
      setgcrefr(*p, o->gch.nextgc);
195,722,410✔
412
      if (o == gcref(g->gc.root))
195,722,410✔
413
        setgcrefr(g->gc.root, o->gch.nextgc);  /* Adjust list anchor. */
×
414
      gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
195,722,410✔
415
    }
416
  }
417
  return p;
10,030,803✔
418
}
419

420
/* Full sweep of a string chain. */
421
static GCRef *gc_sweep_str_chain(global_State *g, GCRef *p)
13,841,586✔
422
{
423
  /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
424
  int ow = otherwhite(g);
13,841,586✔
425
  GCobj *o;
13,841,586✔
426
  while ((o = gcref(*p)) != NULL) {
21,057,333✔
427
    if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
7,215,747✔
428
      lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
6,530,332✔
429
                 "sweep of undead string");
430
      makewhite(g, o);  /* Value is alive, change to the current white. */
6,530,332✔
431
#if LUAJIT_SMART_STRINGS
432
      if (strsmart(&o->str)) {
6,530,332✔
433
        /* must match lj_str_new */
434
        bloomset(g->strbloom.next[0], o->str.hash >> (sizeof(o->str.hash)*8-6));
21,260✔
435
        bloomset(g->strbloom.next[1], o->str.strflags);
21,260✔
436
      }
437
#endif
438
      p = &o->gch.nextgc;
6,530,332✔
439
    } else {  /* Otherwise value is dead, free it. */
440
      lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
685,415✔
441
                 "sweep of unlive string");
442
      setgcrefr(*p, o->gch.nextgc);
685,415✔
443
      lj_str_free(g, &o->str);
685,415✔
444
    }
445
  }
446
  return p;
13,841,586✔
447
}
448

449
/* Check whether we can clear a key or a value slot from a table. */
450
static int gc_mayclear(cTValue *o, int val)
451
{
452
  if (tvisgcv(o)) {  /* Only collectable objects can be weak references. */
453
    if (tvisstr(o)) {  /* But strings cannot be used as weak references. */
454
      gc_mark_str(strV(o));  /* And need to be marked. */
455
      return 0;
456
    }
457
    if (iswhite(gcV(o)))
458
      return 1;  /* Object is about to be collected. */
459
    if (tvisudata(o) && val && isfinalized(udataV(o)))
460
      return 1;  /* Finalized userdata is dropped only from values. */
461
  }
462
  return 0;  /* Cannot clear. */
463
}
464

465
/* Clear collected entries from weak tables. */
466
static void gc_clearweak(global_State *g, GCobj *o)
467
{
468
  UNUSED(g);
469
  while (o) {
470
    GCtab *t = gco2tab(o);
471
    lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table");
472
    if ((t->marked & LJ_GC_WEAKVAL)) {
473
      MSize i, asize = t->asize;
474
      for (i = 0; i < asize; i++) {
475
        /* Clear array slot when value is about to be collected. */
476
        TValue *tv = arrayslot(t, i);
477
        if (gc_mayclear(tv, 1))
478
          setnilV(tv);
479
      }
480
    }
481
    if (t->hmask > 0) {
482
      Node *node = noderef(t->node);
483
      MSize i, hmask = t->hmask;
484
      for (i = 0; i <= hmask; i++) {
485
        Node *n = &node[i];
486
        /* Clear hash slot when key or value is about to be collected. */
487
        if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
488
                                  gc_mayclear(&n->val, 1)))
489
          setnilV(&n->val);
490
      }
491
    }
492
    o = gcref(t->gclist);
493
  }
494
}
495

496
/* Call a userdata or cdata finalizer. */
497
static void gc_call_finalizer(global_State *g, lua_State *L,
498
                              cTValue *mo, GCobj *o)
499
{
500
  /* Save and restore lots of state around the __gc callback. */
501
  uint8_t oldh = hook_save(g);
502
  GCSize oldt = g->gc.threshold;
503
  int errcode;
504
  TValue *top;
505
  lj_trace_abort(g);
506
  hook_entergc(g);  /* Disable hooks and new traces during __gc. */
507
  if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
508
  g->gc.threshold = LJ_MAX_MEM;  /* Prevent GC steps. */
509
  top = L->top;
510
  copyTV(L, top++, mo);
511
  if (LJ_FR2) setnilV(top++);
512
  setgcV(L, top, o, ~o->gch.gct);
513
  L->top = top+1;
514
  errcode = lj_vm_pcall(L, top, 1+0, -1);  /* Stack: |mo|o| -> | */
515
  hook_restore(g, oldh);
516
  if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
517
  g->gc.threshold = oldt;  /* Restore GC threshold. */
518
  if (errcode) {
519
    ptrdiff_t errobj = savestack(L, L->top-1);  /* Stack may be resized. */
520
    lj_vmevent_send(L, ERRFIN,
521
      copyTV(L, L->top++, restorestack(L, errobj));
522
    );
523
    L->top--;
524
  }
525
}
526

527
/* Finalize one userdata or cdata object from the mmudata list. */
528
static void gc_finalize(lua_State *L)
25,544,233✔
529
{
530
  global_State *g = G(L);
25,544,233✔
531
  GCobj *o = gcnext(gcref(g->gc.mmudata));
25,544,233✔
532
  cTValue *mo;
25,544,233✔
533
  lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace");
25,544,233✔
534
  /* Unchain from list of userdata to be finalized. */
535
  if (o == gcref(g->gc.mmudata))
25,544,233✔
536
    setgcrefnull(g->gc.mmudata);
6,428✔
537
  else
538
    setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
25,537,805✔
539
#if LJ_HASFFI
540
  if (o->gch.gct == ~LJ_TCDATA) {
25,544,233✔
541
    TValue tmp, *tv;
24,539,667✔
542
    /* Add cdata back to the GC list and make it white. */
543
    setgcrefr(o->gch.nextgc, g->gc.root);
24,539,667✔
544
    setgcref(g->gc.root, o);
24,539,667✔
545
    makewhite(g, o);
24,539,667✔
546
    o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
24,539,667✔
547
    /* Resolve finalizer. */
548
    setcdataV(L, &tmp, gco2cd(o));
24,539,667✔
549
    tv = lj_tab_set(L, tabref(g->gcroot[GCROOT_FFI_FIN]), &tmp);
24,539,667✔
550
    if (!tvisnil(tv)) {
24,539,667✔
551
      g->gc.nocdatafin = 0;
22,218,307✔
552
      copyTV(L, &tmp, tv);
22,218,307✔
553
      setnilV(tv);  /* Clear entry in finalizer table. */
22,218,307✔
554
      gc_call_finalizer(g, L, &tmp, o);
22,218,307✔
555
    }
556
    return;
24,539,667✔
557
  }
558
#endif
559
  /* Add userdata back to the main userdata list and make it white. */
560
  setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
1,004,566✔
561
  setgcref(mainthread(g)->nextgc, o);
1,004,566✔
562
  makewhite(g, o);
1,004,566✔
563
  /* Resolve the __gc metamethod. */
564
  mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
1,004,566✔
565
  if (mo)
1,004,566✔
566
    gc_call_finalizer(g, L, mo, o);
1,004,566✔
567
}
568

569
/* Finalize all userdata objects from mmudata list. */
570
void lj_gc_finalize_udata(lua_State *L)
380✔
571
{
572
  while (gcref(G(L)->gc.mmudata) != NULL)
2,323,575✔
573
    gc_finalize(L);
2,323,195✔
574
}
380✔
575

576
#if LJ_HASFFI
577
/* Finalize all cdata objects from finalizer table. */
578
void lj_gc_finalize_cdata(lua_State *L)
380✔
579
{
580
  global_State *g = G(L);
380✔
581
  GCtab *t = tabref(g->gcroot[GCROOT_FFI_FIN]);
380✔
582
  Node *node = noderef(t->node);
380✔
583
  ptrdiff_t i;
380✔
584
  setgcrefnull(t->metatable);  /* Mark finalizer table as disabled. */
380✔
585
  for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
75,499,376✔
586
    if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
75,498,996✔
587
      GCobj *o = gcV(&node[i].key);
74,930,785✔
588
      TValue tmp;
74,930,785✔
589
      makewhite(g, o);
74,930,785✔
590
      o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
74,930,785✔
591
      copyTV(L, &tmp, &node[i].val);
74,930,785✔
592
      setnilV(&node[i].val);
74,930,785✔
593
      gc_call_finalizer(g, L, &tmp, o);
74,930,785✔
594
    }
595
}
380✔
596
#endif
597

598
/* Free all remaining GC objects. */
599
void lj_gc_freeall(global_State *g)
371✔
600
{
601
  MSize i, strmask;
371✔
602
  /* Free everything, except super-fixed objects (the main thread). */
603
  g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
371✔
604
  gc_fullsweep(g, &g->gc.root);
371✔
605
  strmask = g->strmask;
371✔
606
  for (i = 0; i <= strmask; i++)  /* Free all string hash chains. */
338,291✔
607
    gc_fullsweep(g, &g->strhash[i]);
337,920✔
608
}
371✔
609

610
/* -- Collector ----------------------------------------------------------- */
611

612
/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
613
static void atomic(global_State *g, lua_State *L)
6,779✔
614
{
615
  size_t udsize;
6,779✔
616

617
  gc_mark_uv(g);  /* Need to remark open upvalues (the thread may be dead). */
6,779✔
618
  gc_propagate_gray(g);  /* Propagate any left-overs. */
6,779✔
619

620
  setgcrefr(g->gc.gray, g->gc.weak);  /* Empty the list of weak tables. */
6,779✔
621
  setgcrefnull(g->gc.weak);
6,779✔
622
  lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white");
6,779✔
623
  gc_markobj(g, L);  /* Mark running thread. */
6,779✔
624
  gc_traverse_curtrace(g);  /* Traverse current trace. */
6,779✔
625
  gc_mark_gcroot(g);  /* Mark GC roots (again). */
6,779✔
626
  gc_propagate_gray(g);  /* Propagate all of the above. */
6,779✔
627

628
  setgcrefr(g->gc.gray, g->gc.grayagain);  /* Empty the 2nd chance list. */
6,779✔
629
  setgcrefnull(g->gc.grayagain);
6,779✔
630
  gc_propagate_gray(g);  /* Propagate it. */
6,779✔
631

632
  udsize = lj_gc_separateudata(g, 0);  /* Separate userdata to be finalized. */
6,779✔
633
  gc_mark_mmudata(g);  /* Mark them. */
6,779✔
634
  udsize += gc_propagate_gray(g);  /* And propagate the marks. */
6,779✔
635

636
  /* All marking done, clear weak tables. */
637
  gc_clearweak(g, gcref(g->gc.weak));
6,779✔
638

639
  lj_buf_shrink(L, &g->tmpbuf);  /* Shrink temp buffer. */
6,779✔
640

641
  /* Prepare for sweep phase. */
642
  g->gc.currentwhite = (uint8_t)otherwhite(g);  /* Flip current white. */
6,779✔
643
  g->strempty.marked = g->gc.currentwhite;
6,779✔
644
  setmref(g->gc.sweep, &g->gc.root);
6,779✔
645
  g->gc.estimate = g->gc.total - (GCSize)udsize;  /* Initial estimate. */
6,779✔
646
}
6,779✔
647

648
/* GC state machine. Returns a cost estimate for each step performed. */
649
static size_t gc_onestep(lua_State *L)
52,867,111✔
650
{
651
  global_State *g = G(L);
52,867,111✔
652
  g->gc.state_count[g->gc.state]++;
52,867,111✔
653
  switch (g->gc.state) {
52,867,111✔
654
  case GCSpause:
6,824✔
655
    gc_mark_start(g);  /* Start a new GC cycle by marking all GC roots. */
6,824✔
656
    return 0;
6,824✔
657
  case GCSpropagate:
6,739,786✔
658
    if (gcref(g->gc.gray) != NULL)
6,739,786✔
659
      return propagatemark(g);  /* Propagate one gray object. */
6,733,007✔
660
    g->gc.state = GCSatomic;  /* End of mark phase. */
6,779✔
661
    return 0;
6,779✔
662
  case GCSatomic:
6,946✔
663
    if (tvref(g->jit_base))  /* Don't run atomic phase on trace. */
6,946✔
664
      return LJ_MAX_MEM;
665
    atomic(g, L);
6,779✔
666
    g->gc.state = GCSsweepstring;  /* Start of sweep phase. */
6,779✔
667
    g->gc.sweepstr = 0;
6,779✔
668
#if LUAJIT_SMART_STRINGS
669
    g->strbloom.next[0] = 0;
6,779✔
670
    g->strbloom.next[1] = 0;
6,779✔
671
#endif
672
    return 0;
6,779✔
673
  case GCSsweepstring: {
13,841,586✔
674
    GCSize old = g->gc.total;
13,841,586✔
675
    gc_sweep_str_chain(g, &g->strhash[g->gc.sweepstr++]);  /* Sweep one chain. */
13,841,586✔
676
    if (g->gc.sweepstr > g->strmask) {
13,841,586✔
677
      g->gc.state = GCSsweep;  /* All string hash chains sweeped. */
7,749✔
678
#if LUAJIT_SMART_STRINGS
679
      g->strbloom.cur[0] = g->strbloom.next[0];
7,749✔
680
      g->strbloom.cur[1] = g->strbloom.next[1];
7,749✔
681
#endif
682
    }
683
    lj_assertG(old >= g->gc.total, "sweep increased memory");
13,841,586✔
684
    g->gc.estimate -= old - g->gc.total;
13,841,586✔
685
    return GCSWEEPCOST;
13,841,586✔
686
    }
687
  case GCSsweep: {
8,213,901✔
688
    GCSize old = g->gc.total;
8,213,901✔
689
    setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
8,213,901✔
690
    lj_assertG(old >= g->gc.total, "sweep increased memory");
8,213,901✔
691
    g->gc.estimate -= old - g->gc.total;
8,213,901✔
692
    if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
8,213,901✔
693
      if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
7,747✔
694
        lj_str_resize(L, g->strmask >> 1);  /* Shrink string table. */
94✔
695
      if (gcref(g->gc.mmudata)) {  /* Need any finalizations? */
7,747✔
696
        g->gc.state = GCSfinalize;
6,059✔
697
#if LJ_HASFFI
698
        g->gc.nocdatafin = 1;
6,059✔
699
#endif
700
      } else {  /* Otherwise skip this phase to help the JIT. */
701
        g->gc.state = GCSpause;  /* End of GC cycle. */
1,688✔
702
        g->gc.debt = 0;
1,688✔
703
      }
704
    }
705
    return GCSWEEPMAX*GCSWEEPCOST;
706
    }
707
  case GCSfinalize:
24,058,068✔
708
    if (gcref(g->gc.mmudata) != NULL) {
24,058,068✔
709
      if (tvref(g->jit_base))  /* Don't call finalizers on trace. */
24,052,012✔
710
        return LJ_MAX_MEM;
711
      gc_finalize(L);  /* Finalize one userdata object. */
23,221,038✔
712
      if (g->gc.estimate > GCFINALIZECOST)
23,221,038✔
713
        g->gc.estimate -= GCFINALIZECOST;
17,050,905✔
714
      return GCFINALIZECOST;
23,221,038✔
715
    }
716
#if LJ_HASFFI
717
    if (!g->gc.nocdatafin) lj_tab_rehash(L, tabref(g->gcroot[GCROOT_FFI_FIN]));
6,056✔
718
#endif
719
    g->gc.state = GCSpause;  /* End of GC cycle. */
6,055✔
720
    g->gc.debt = 0;
6,055✔
721
    return 0;
6,055✔
722
  default:
723
    lj_assertG(0, "bad GC state");
724
    return 0;
725
  }
726
}
727

728
/* Perform a limited amount of incremental GC steps. */
729
int LJ_FASTCALL lj_gc_step(lua_State *L)
3,159,822✔
730
{
731
  global_State *g = G(L);
3,159,822✔
732
  GCSize lim;
3,159,822✔
733
  int32_t ostate = g->vmstate;
3,159,822✔
734
  setvmstate(g, GC);
3,159,822✔
735
  lim = (GCSTEPSIZE/100) * g->gc.stepmul;
3,159,822✔
736
  if (lim == 0)
3,159,822✔
737
    lim = LJ_MAX_MEM;
×
738
  if (g->gc.total > g->gc.threshold)
3,159,822✔
739
    g->gc.debt += g->gc.total - g->gc.threshold;
1,780,358✔
740
  do {
36,370,033✔
741
    lim -= (GCSize)gc_onestep(L);
36,370,033✔
742
    if (g->gc.state == GCSpause) {
36,370,032✔
743
      g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
5,778✔
744
      g->vmstate = ostate;
5,778✔
745
      return 1;  /* Finished a GC cycle. */
5,778✔
746
    }
747
  } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
36,364,254✔
748
  if (g->gc.debt < GCSTEPSIZE) {
3,154,043✔
749
    g->gc.threshold = g->gc.total + GCSTEPSIZE;
994,159✔
750
    g->vmstate = ostate;
994,159✔
751
    return -1;
994,159✔
752
  } else {
753
    g->gc.debt -= GCSTEPSIZE;
2,159,884✔
754
    g->gc.threshold = g->gc.total;
2,159,884✔
755
    g->vmstate = ostate;
2,159,884✔
756
    return 0;
2,159,884✔
757
  }
758
}
759

760
/* Ditto, but fix the stack top first. */
761
void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
74,766✔
762
{
763
  if (curr_funcisL(L)) L->top = curr_topL(L);
74,766✔
764
  lj_gc_step(L);
74,766✔
765
}
74,766✔
766

767
/* Need to protect lj_gc_step because it may throw. */
768
static TValue *gc_step_jit_cp(lua_State *L, lua_CFunction dummy, void *ud)
1,773,035✔
769
{
770
  MSize steps = (MSize)(uintptr_t)ud;
1,773,035✔
771
  UNUSED(dummy);
1,773,035✔
772

773
  /* Always catch error here and don't call error function. */
774
  cframe_errfunc(L->cframe) = 0;
1,773,035✔
775
  cframe_nres(L->cframe) = -2*LUAI_MAXSTACK*(int)sizeof(TValue);
1,773,035✔
776

777
  while (steps-- > 0 && lj_gc_step(L) == 0)
3,547,171✔
778
    ;
3,010,197✔
779

780
  return NULL;
1,773,034✔
781
}
782

783
#if LJ_HASJIT
784
/* Perform multiple GC steps. Called from JIT-compiled code. */
785
int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
1,773,035✔
786
{
787
  lua_State *L = gco2th(gcref(g->cur_L));
1,773,035✔
788
  int32_t ostate = g->vmstate;
1,773,035✔
789
  int errcode;
1,773,035✔
790
  L->base = tvref(G(L)->jit_base);
1,773,035✔
791
  L->top = curr_topL(L);
1,773,035✔
792
  errcode = lj_vm_cpcall(L, NULL, (void *)(uintptr_t)steps, gc_step_jit_cp);
1,773,035✔
793
  g->vmstate = ostate;
1,773,035✔
794
  if (errcode)
1,773,035✔
795
    lj_err_throw(L, errcode);  /* Propagate errors. */
1✔
796
  /* Return 1 to force a trace exit. */
797
  return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
1,773,034✔
798
}
799
#endif
800

801
/* Perform a full GC cycle. */
802
void lj_gc_fullgc(lua_State *L)
984✔
803
{
804
  global_State *g = G(L);
984✔
805
  int32_t ostate = g->vmstate;
984✔
806
  setvmstate(g, GC);
984✔
807
  if (g->gc.state <= GCSatomic) {  /* Caught somewhere in the middle. */
984✔
808
    setmref(g->gc.sweep, &g->gc.root);  /* Sweep everything (preserving it). */
976✔
809
    setgcrefnull(g->gc.gray);  /* Reset lists from partial propagation. */
976✔
810
    setgcrefnull(g->gc.grayagain);
976✔
811
    setgcrefnull(g->gc.weak);
976✔
812
    g->gc.state = GCSsweepstring;  /* Fast forward to the sweep phase. */
976✔
813
    g->gc.sweepstr = 0;
976✔
814
  }
815
  while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
6,174,744✔
816
    gc_onestep(L);  /* Finish sweep. */
6,173,760✔
817
  lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause,
984✔
818
             "bad GC state");
819
  /* Now perform a full GC. */
820
  g->gc.state = GCSpause;
984✔
821
  do { gc_onestep(L); } while (g->gc.state != GCSpause);
10,323,318✔
822
  g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
984✔
823
  g->vmstate = ostate;
984✔
824
}
984✔
825

826
/* -- Write barriers ------------------------------------------------------ */
827

828
/* Move the GC propagation frontier forward. */
829
void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
2,541✔
830
{
831
  lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o),
2,541✔
832
             "bad object states for forward barrier");
833
  lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
2,541✔
834
             "bad GC state");
835
  lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table");
2,541✔
836
  /* Preserve invariant during propagation. Otherwise it doesn't matter. */
837
  if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
2,541✔
838
    gc_mark(g, v);  /* Move frontier forward. */
33✔
839
  else
840
    makewhite(g, o);  /* Make it white to avoid the following barrier. */
2,508✔
841
}
×
842

843
/* Specialized barrier for closed upvalue. Pass &uv->tv. */
844
void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
3✔
845
{
846
#define TV2MARKED(x) \
847
  (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
848
  if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
3✔
849
    gc_mark(g, gcV(tv));
×
850
  else
851
    TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
3✔
852
#undef TV2MARKED
853
}
3✔
854

855
/* Close upvalue. Also needs a write barrier. */
856
void lj_gc_closeuv(global_State *g, GCupval *uv)
15,646✔
857
{
858
  GCobj *o = obj2gco(uv);
15,646✔
859
  /* Copy stack slot to upvalue itself and point to the copy. */
860
  copyTV(mainthread(g), &uv->tv, uvval(uv));
15,646✔
861
  setmref(uv->v, &uv->tv);
15,646✔
862
  uv->closed = 1;
15,646✔
863
  setgcrefr(o->gch.nextgc, g->gc.root);
15,646✔
864
  setgcref(g->gc.root, o);
15,646✔
865
  if (isgray(o)) {  /* A closed upvalue is never gray, so fix this. */
15,646✔
866
    if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
553✔
867
      gray2black(o);  /* Make it black and preserve invariant. */
241✔
868
      if (tviswhite(&uv->tv))
241✔
869
        lj_gc_barrierf(g, o, gcV(&uv->tv));
×
870
    } else {
871
      makewhite(g, o);  /* Make it white, i.e. sweep the upvalue. */
312✔
872
      lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
15,646✔
873
                 "bad GC state");
874
    }
875
  }
876
}
15,646✔
877

878
#if LJ_HASJIT
879
/* Mark a trace if it's saved during the propagation phase. */
880
void lj_gc_barriertrace(global_State *g, uint32_t traceno)
15,959✔
881
{
882
  if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
15,959✔
883
    gc_marktrace(g, traceno);
1,006✔
884
}
15,959✔
885
#endif
886

887
/* -- Allocator ----------------------------------------------------------- */
888

889
/* Call pluggable memory allocator to allocate or resize a fragment. */
890
void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
4,982,237✔
891
{
892
  global_State *g = G(L);
4,982,237✔
893
  lj_assertG((osz == 0) == (p == NULL), "realloc API violation");
4,982,237✔
894

895
  setgcref(g->mem_L, obj2gco(L));
4,982,237✔
896
  p = g->allocf(g->allocd, p, osz, nsz);
4,982,237✔
897
  if (p == NULL && nsz > 0)
4,982,237✔
898
    lj_err_mem(L);
5✔
899
  lj_assertG((nsz == 0) == (p == NULL), "allocf API violation");
4,982,232✔
900
  lj_assertG(checkptrGC(p),
4,982,232✔
901
             "allocated memory address %p outside required range", p);
902
  g->gc.total = (g->gc.total - osz) + nsz;
4,982,232✔
903
  g->gc.allocated += nsz;
4,982,232✔
904
  g->gc.freed += osz;
4,982,232✔
905
  return p;
4,982,232✔
906
}
907

908
/* Allocate new GC object and link it to the root set. */
909
void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
169,882,071✔
910
{
911
  global_State *g = G(L);
169,882,071✔
912
  GCobj *o;
169,882,071✔
913

914
  setgcref(g->mem_L, obj2gco(L));
169,882,071✔
915
  o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
169,882,071✔
916
  if (o == NULL)
169,882,071✔
917
    lj_err_mem(L);
×
918
  lj_assertG(checkptrGC(o),
169,882,071✔
919
             "allocated memory address %p outside required range", o);
920
  g->gc.total += size;
169,882,071✔
921
  g->gc.allocated += size;
169,882,071✔
922
  setgcrefr(o->gch.nextgc, g->gc.root);
169,882,071✔
923
  setgcref(g->gc.root, o);
169,882,071✔
924
  newwhite(g, o);
169,882,071✔
925
  return o;
169,882,071✔
926
}
927

928
/* Resize growable vector. */
929
void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
181,996✔
930
{
931
  MSize sz = (*szp) << 1;
181,996✔
932
  if (sz < LJ_MIN_VECSZ)
181,996✔
933
    sz = LJ_MIN_VECSZ;
934
  if (sz > lim)
181,996✔
935
    sz = lim;
936
  p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
181,996✔
937
  *szp = sz;
181,995✔
938
  return p;
181,995✔
939
}
940

STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc