• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 6851160821

13 Nov 2023 02:06PM UTC coverage: 88.458% (-0.008%) from 88.466%
6851160821

push

github

igormunkin
Fix last commit.

Reported by PluMGMK.

(cherry-picked from commit 224129a8e)

The `_VMEVENTS` table, where the error handler for GC finalizers
is set, was not cleared from the stack after the initialization.
This commit adds stack cleanup.

Maxim Kokryashkin:
* added the description and the test for the problem

Part of tarantool/tarantool#9145

Reviewed-by: Sergey Kaplun <skaplun@tarantool.org>
Reviewed-by: Sergey Bronnikov <sergeyb@tarantool.org>
Signed-off-by: Igor Munkin <imun@tarantool.org>

5361 of 5976 branches covered (0.0%)

Branch coverage included in aggregate %.

1 of 1 new or added line in 1 file covered. (100.0%)

33 existing lines in 8 files now uncovered.

20551 of 23317 relevant lines covered (88.14%)

2746324.23 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

98.84
/src/lj_gc.c
1
/*
2
** Garbage collector.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
**
5
** Major portions taken verbatim or adapted from the Lua interpreter.
6
** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7
*/
8

9
#define lj_gc_c
10
#define LUA_CORE
11

12
#include "lj_obj.h"
13
#include "lj_gc.h"
14
#include "lj_err.h"
15
#include "lj_buf.h"
16
#include "lj_str.h"
17
#include "lj_tab.h"
18
#include "lj_func.h"
19
#include "lj_udata.h"
20
#include "lj_meta.h"
21
#include "lj_state.h"
22
#include "lj_frame.h"
23
#if LJ_HASFFI
24
#include "lj_ctype.h"
25
#include "lj_cdata.h"
26
#endif
27
#include "lj_trace.h"
28
#include "lj_dispatch.h"
29
#include "lj_vm.h"
30
#include "lj_vmevent.h"
31

32
#define GCSTEPSIZE        1024u
33
#define GCSWEEPMAX        40
34
#define GCSWEEPCOST        10
35
#define GCFINALIZECOST        100
36

37
/* Macros to set GCobj colors and flags. */
38
#define white2gray(x)                ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
39
#define gray2black(x)                ((x)->gch.marked |= LJ_GC_BLACK)
40
#define isfinalized(u)                ((u)->marked & LJ_GC_FINALIZED)
41

42
/* -- Mark phase ---------------------------------------------------------- */
43

44
/* Mark a TValue (if needed). */
45
#define gc_marktv(g, tv) \
46
  { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \
47
               "TValue and GC type mismatch"); \
48
    if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
49

50
/* Mark a GCobj (if needed). */
51
#define gc_markobj(g, o) \
52
  { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
53

54
/* Mark a string object. */
55
#define gc_mark_str(s)                ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
56

57
/* Mark a white GCobj. */
58
static void gc_mark(global_State *g, GCobj *o)
189,743,094✔
59
{
60
  int gct = o->gch.gct;
189,748,878✔
61
  lj_assertG(iswhite(o), "mark of non-white object");
189,748,878✔
62
  lj_assertG(!isdead(g, o), "mark of dead object");
189,748,878✔
63
  white2gray(o);
189,748,878✔
64
  if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
189,748,878✔
65
    GCtab *mt = tabref(gco2ud(o)->metatable);
1,055,388✔
66
    gray2black(o);  /* Userdata are never gray. */
1,055,388✔
67
    if (mt) gc_markobj(g, mt);
1,055,388✔
68
    gc_markobj(g, tabref(gco2ud(o)->env));
1,055,388✔
69
  } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
188,693,490✔
70
    GCupval *uv = gco2uv(o);
1,677,810✔
71
    gc_marktv(g, uvval(uv));
1,677,810✔
72
    if (uv->closed)
1,677,810✔
73
      gray2black(o);  /* Closed upvalues are never gray. */
113,261✔
74
  } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
187,015,680✔
75
    lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
7,419,764✔
76
               gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE,
77
               "bad GC type %d", gct);
78
    setgcrefr(o->gch.gclist, g->gc.gray);
7,419,764✔
79
    setgcref(g->gc.gray, o);
7,419,764✔
80
  }
81
}
189,743,094✔
82

83
/* Mark GC roots. */
84
static void gc_mark_gcroot(global_State *g)
12,894✔
85
{
86
  ptrdiff_t i;
12,894✔
87
  for (i = 0; i < GCROOT_MAX; i++)
502,866✔
88
    if (gcref(g->gcroot[i]) != NULL)
489,972✔
89
      gc_markobj(g, gcref(g->gcroot[i]));
333,378✔
90
}
12,894✔
91

92
/* Start a GC cycle and mark the root set. */
93
static void gc_mark_start(global_State *g)
6,460✔
94
{
95
  setgcrefnull(g->gc.gray);
6,460✔
96
  setgcrefnull(g->gc.grayagain);
6,460✔
97
  setgcrefnull(g->gc.weak);
6,460✔
98
  gc_markobj(g, mainthread(g));
6,460✔
99
  gc_markobj(g, tabref(mainthread(g)->env));
6,460✔
100
  gc_marktv(g, &g->registrytv);
6,460✔
101
  gc_mark_gcroot(g);
6,460✔
102
  g->gc.state = GCSpropagate;
6,460✔
103
}
6,460✔
104

105
/* Mark open upvalues. */
106
static void gc_mark_uv(global_State *g)
6,434✔
107
{
108
  GCupval *uv;
6,434✔
109
  for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
1,570,945✔
110
    lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
1,564,511✔
111
               "broken upvalue chain");
112
    if (isgray(obj2gco(uv)))
1,564,511✔
113
      gc_marktv(g, uvval(uv));
1,564,511✔
114
  }
115
}
6,434✔
116

117
/* Mark userdata in mmudata list. */
118
static void gc_mark_mmudata(global_State *g)
6,434✔
119
{
120
  GCobj *root = gcref(g->gc.mmudata);
6,434✔
121
  GCobj *u = root;
6,434✔
122
  if (u) {
6,434✔
123
    do {
1,002,844✔
124
      u = gcnext(u);
1,002,844✔
125
      makewhite(g, u);  /* Could be from previous GC. */
1,002,844✔
126
      gc_mark(g, u);
1,002,844✔
127
    } while (u != root);
1,002,844✔
128
  }
129
}
6,434✔
130

131
/* Separate userdata objects to be finalized to mmudata list. */
132
size_t lj_gc_separateudata(global_State *g, int all)
6,932✔
133
{
134
  size_t m = 0;
6,932✔
135
  GCRef *p = &mainthread(g)->nextgc;
6,932✔
136
  GCobj *o;
6,932✔
137
  while ((o = gcref(*p)) != NULL) {
2,070,092✔
138
    if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
2,063,160✔
139
      p = &o->gch.nextgc;  /* Nothing to do. */
1,056,959✔
140
    } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
1,006,201✔
141
      markfinalized(o);  /* Done, as there's no __gc metamethod. */
2,256✔
142
      p = &o->gch.nextgc;
2,256✔
143
    } else {  /* Otherwise move userdata to be finalized to mmudata list. */
144
      m += sizeudata(gco2ud(o));
1,003,945✔
145
      markfinalized(o);
1,003,945✔
146
      *p = o->gch.nextgc;
1,003,945✔
147
      if (gcref(g->gc.mmudata)) {  /* Link to end of mmudata list. */
1,003,945✔
148
        GCobj *root = gcref(g->gc.mmudata);
997,742✔
149
        setgcrefr(o->gch.nextgc, root->gch.nextgc);
997,742✔
150
        setgcref(root->gch.nextgc, o);
997,742✔
151
        setgcref(g->gc.mmudata, o);
997,742✔
152
      } else {  /* Create circular list. */
153
        setgcref(o->gch.nextgc, o);
6,203✔
154
        setgcref(g->gc.mmudata, o);
6,203✔
155
      }
156
    }
157
  }
158
  return m;
6,932✔
159
}
160

161
/* -- Propagation phase --------------------------------------------------- */
162

163
/* Traverse a table. */
164
static int gc_traverse_tab(global_State *g, GCtab *t)
2,824,442✔
165
{
166
  int weak = 0;
2,824,442✔
167
  cTValue *mode;
2,824,442✔
168
  GCtab *mt = tabref(t->metatable);
2,824,442✔
169
  if (mt)
2,824,442✔
170
    gc_markobj(g, mt);
30,553✔
171
  mode = lj_meta_fastg(g, mt, MM_mode);
2,824,442✔
172
  if (mode && tvisstr(mode)) {  /* Valid __mode field? */
18,485✔
173
    const char *modestr = strVdata(mode);
18,408✔
174
    int c;
18,408✔
175
    while ((c = *modestr++)) {
49,760✔
176
      if (c == 'k') weak |= LJ_GC_WEAKKEY;
31,352✔
177
      else if (c == 'v') weak |= LJ_GC_WEAKVAL;
12,966✔
178
    }
179
    if (weak) {  /* Weak tables are cleared in the atomic phase. */
18,408✔
180
#if LJ_HASFFI
181
      CTState *cts = ctype_ctsG(g);
18,408✔
182
      if (cts && cts->finalizer == t) {
18,408✔
183
        weak = (int)(~0u & ~LJ_GC_WEAKVAL);
184
      } else
185
#endif
186
      {
187
        t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
12,968✔
188
        setgcrefr(t->gclist, g->gc.weak);
12,968✔
189
        setgcref(g->gc.weak, obj2gco(t));
12,968✔
190
      }
191
    }
192
  }
193
  if (weak == LJ_GC_WEAK)  /* Nothing to mark if both keys/values are weak. */
25,036✔
194
    return 1;
195
  if (!(weak & LJ_GC_WEAKVAL)) {  /* Mark array part. */
2,811,498✔
196
    MSize i, asize = t->asize;
2,811,476✔
197
    for (i = 0; i < asize; i++)
680,021,114✔
198
      gc_marktv(g, arrayslot(t, i));
677,209,638✔
199
  }
200
  if (t->hmask > 0) {  /* Mark hash part. */
2,811,498✔
201
    Node *node = noderef(t->node);
1,712,086✔
202
    MSize i, hmask = t->hmask;
1,712,086✔
203
    for (i = 0; i <= hmask; i++) {
612,744,266✔
204
      Node *n = &node[i];
611,032,180✔
205
      if (!tvisnil(&n->val)) {  /* Mark non-empty slot. */
611,032,180✔
206
        lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot");
446,658,118✔
207
        if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
446,658,118✔
208
        if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
611,032,180✔
209
      }
210
    }
211
  }
212
  return weak;
213
}
214

215
/* Traverse a function. */
216
static void gc_traverse_func(global_State *g, GCfunc *fn)
3,421,641✔
217
{
218
  gc_markobj(g, tabref(fn->c.env));
3,421,641✔
219
  if (isluafunc(fn)) {
3,421,641✔
220
    uint32_t i;
2,113,851✔
221
    lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv,
2,113,851✔
222
               "function upvalues out of range");
223
    gc_markobj(g, funcproto(fn));
2,113,851✔
224
    for (i = 0; i < fn->l.nupvalues; i++)  /* Mark Lua function upvalues. */
3,954,792✔
225
      gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
1,840,941✔
226
  } else {
227
    uint32_t i;
228
    for (i = 0; i < fn->c.nupvalues; i++)  /* Mark C function upvalues. */
1,436,789✔
229
      gc_marktv(g, &fn->c.upvalue[i]);
128,999✔
230
  }
231
}
3,421,641✔
232

233
#if LJ_HASJIT
234
/* Mark a trace. */
235
static void gc_marktrace(global_State *g, TraceNo traceno)
119,417✔
236
{
237
  GCobj *o = obj2gco(traceref(G2J(g), traceno));
119,417✔
238
  lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped");
119,417✔
239
  if (iswhite(o)) {
119,417✔
240
    white2gray(o);
66,037✔
241
    setgcrefr(o->gch.gclist, g->gc.gray);
66,037✔
242
    setgcref(g->gc.gray, o);
66,037✔
243
  }
244
}
245

246
/* Traverse a trace. */
247
static void gc_traverse_trace(global_State *g, GCtrace *T)
72,479✔
248
{
249
  IRRef ref;
72,479✔
250
  if (T->traceno == 0) return;
72,479✔
251
  for (ref = T->nk; ref < REF_TRUE; ref++) {
810,852✔
252
    IRIns *ir = &T->ir[ref];
744,846✔
253
    if (ir->o == IR_KGC)
744,846✔
254
      gc_markobj(g, ir_kgc(ir));
260,772✔
255
    if (irt_is64(ir->t) && ir->o != IR_KNULL)
744,846✔
256
      ref++;
395,791✔
257
  }
258
  if (T->link) gc_marktrace(g, T->link);
66,006✔
259
  if (T->nextroot) gc_marktrace(g, T->nextroot);
66,006✔
260
  if (T->nextside) gc_marktrace(g, T->nextside);
66,006✔
261
  gc_markobj(g, gcref(T->startpt));
66,006✔
262
}
263

264
/* The current trace is a GC root while not anchored in the prototype (yet). */
265
#define gc_traverse_curtrace(g)        gc_traverse_trace(g, &G2J(g)->cur)
266
#else
267
#define gc_traverse_curtrace(g)        UNUSED(g)
268
#endif
269

270
/* Traverse a prototype. */
271
static void gc_traverse_proto(global_State *g, GCproto *pt)
393,816✔
272
{
273
  ptrdiff_t i;
393,816✔
274
  gc_mark_str(proto_chunkname(pt));
393,816✔
275
  for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++)  /* Mark collectable consts. */
2,189,568✔
276
    gc_markobj(g, proto_kgc(pt, i));
1,795,752✔
277
#if LJ_HASJIT
278
  if (pt->trace) gc_marktrace(g, pt->trace);
393,816✔
279
#endif
280
}
393,816✔
281

282
/* Traverse the frame structure of a stack. */
283
static MSize gc_traverse_frames(global_State *g, lua_State *th)
284
{
285
  TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
286
  /* Note: extra vararg frame not skipped, marks function twice (harmless). */
287
  for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
288
    GCfunc *fn = frame_func(frame);
289
    TValue *ftop = frame;
290
    if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
291
    if (ftop > top) top = ftop;
292
    if (!LJ_FR2) gc_markobj(g, fn);  /* Need to mark hidden function (or L). */
293
  }
294
  top++;  /* Correct bias of -1 (frame == base-1). */
295
  if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
296
  return (MSize)(top - bot);  /* Return minimum needed stack size. */
297
}
298

299
/* Traverse a thread object. */
300
static void gc_traverse_thread(global_State *g, lua_State *th)
1,570,182✔
301
{
302
  TValue *o, *top = th->top;
1,570,182✔
303
  for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
11,403,982✔
304
    gc_marktv(g, o);
9,833,800✔
305
  if (g->gc.state == GCSatomic) {
1,570,182✔
306
    top = tvref(th->stack) + th->stacksize;
784,626✔
307
    for (; o < top; o++)  /* Clear unmarked slots. */
31,567,600✔
308
      setnilV(o);
30,782,974✔
309
  }
310
  gc_markobj(g, tabref(th->env));
1,570,182✔
311
  lj_state_shrinkstack(th, gc_traverse_frames(g, th));
1,570,182✔
312
}
1,570,182✔
313

314
/* Propagate one gray object. Traverse it and turn it black. */
315
static size_t propagatemark(global_State *g)
8,276,126✔
316
{
317
  GCobj *o = gcref(g->gc.gray);
8,276,126✔
318
  int gct = o->gch.gct;
8,276,126✔
319
  lj_assertG(isgray(o), "propagation of non-gray object");
8,276,126✔
320
  gray2black(o);
8,276,126✔
321
  setgcrefr(g->gc.gray, o->gch.gclist);  /* Remove from gray list. */
8,276,126✔
322
  if (LJ_LIKELY(gct == ~LJ_TTAB)) {
8,276,126✔
323
    GCtab *t = gco2tab(o);
2,824,442✔
324
    if (gc_traverse_tab(g, t) > 0)
2,824,442✔
325
      black2gray(o);  /* Keep weak tables gray. */
12,968✔
326
    return sizeof(GCtab) + sizeof(TValue) * t->asize +
2,824,442✔
327
                           (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0);
2,824,442✔
328
  } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
5,451,684✔
329
    GCfunc *fn = gco2func(o);
3,421,641✔
330
    gc_traverse_func(g, fn);
3,421,641✔
331
    return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
3,421,641✔
332
                           sizeCfunc((MSize)fn->c.nupvalues);
1,307,790✔
333
  } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
2,030,043✔
334
    GCproto *pt = gco2pt(o);
393,816✔
335
    gc_traverse_proto(g, pt);
393,816✔
336
    return pt->sizept;
393,816✔
337
  } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
1,636,227✔
338
    lua_State *th = gco2th(o);
1,570,182✔
339
    setgcrefr(th->gclist, g->gc.grayagain);
1,570,182✔
340
    setgcref(g->gc.grayagain, o);
1,570,182✔
341
    black2gray(o);  /* Threads are never black. */
1,570,182✔
342
    gc_traverse_thread(g, th);
1,570,182✔
343
    return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
1,570,182✔
344
  } else {
345
#if LJ_HASJIT
346
    GCtrace *T = gco2trace(o);
66,045✔
347
    gc_traverse_trace(g, T);
66,045✔
348
    return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
66,045✔
349
           T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
66,045✔
350
#else
351
    lj_assertG(0, "bad GC type %d", gct);
352
    return 0;
353
#endif
354
  }
355
}
356

357
/* Propagate all gray objects. */
358
static size_t gc_propagate_gray(global_State *g)
25,736✔
359
{
360
  size_t m = 0;
12,868✔
361
  while (gcref(g->gc.gray) != NULL)
2,943,942✔
362
    m += propagatemark(g);
2,918,206✔
363
  return m;
25,736✔
364
}
365

366
/* -- Sweep phase --------------------------------------------------------- */
367

368
/* Type of GC free functions. */
369
typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
370

371
/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
372
static const GCFreeFunc gc_freefunc[] = {
373
  (GCFreeFunc)lj_str_free,
374
  (GCFreeFunc)lj_func_freeuv,
375
  (GCFreeFunc)lj_state_free,
376
  (GCFreeFunc)lj_func_freeproto,
377
  (GCFreeFunc)lj_func_free,
378
#if LJ_HASJIT
379
  (GCFreeFunc)lj_trace_free,
380
#else
381
  (GCFreeFunc)0,
382
#endif
383
#if LJ_HASFFI
384
  (GCFreeFunc)lj_cdata_free,
385
#else
386
  (GCFreeFunc)0,
387
#endif
388
  (GCFreeFunc)lj_tab_free,
389
  (GCFreeFunc)lj_udata_free
390
};
391

392
/* Full sweep of a GC list. */
393
#define gc_fullsweep(g, p)        gc_sweep(g, (p), ~(uint32_t)0)
394

395
/* Partial sweep of a GC list. */
396
static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
8,163,895✔
397
{
398
  /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
399
  int ow = otherwhite(g);
8,163,895✔
400
  GCobj *o;
8,163,895✔
401
  while ((o = gcref(*p)) != NULL && lim-- > 0) {
339,210,618✔
402
    if (o->gch.gct == ~LJ_TTHREAD)  /* Need to sweep open upvalues, too. */
331,046,723✔
403
      gc_fullsweep(g, &gco2th(o)->openupval);
1,478,026✔
404
    if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
331,046,723✔
405
      lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
191,021,455✔
406
                 "sweep of undead object");
407
      makewhite(g, o);  /* Value is alive, change to the current white. */
191,021,455✔
408
      p = &o->gch.nextgc;
191,021,455✔
409
    } else {  /* Otherwise value is dead, free it. */
410
      lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
140,025,268✔
411
                 "sweep of unlive object");
412
      setgcrefr(*p, o->gch.nextgc);
140,025,268✔
413
      if (o == gcref(g->gc.root))
140,025,268✔
UNCOV
414
        setgcrefr(g->gc.root, o->gch.nextgc);  /* Adjust list anchor. */
×
415
      gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
140,025,268✔
416
    }
417
  }
418
  return p;
8,163,895✔
419
}
420

421
/* Full sweep of a string chain. */
422
static GCRef *gc_sweep_str_chain(global_State *g, GCRef *p)
8,644,780✔
423
{
424
  /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
425
  int ow = otherwhite(g);
8,644,780✔
426
  GCobj *o;
8,644,780✔
427
  while ((o = gcref(*p)) != NULL) {
13,640,254✔
428
    if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
4,995,474✔
429
      lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
4,526,821✔
430
                 "sweep of undead string");
431
      makewhite(g, o);  /* Value is alive, change to the current white. */
4,526,821✔
432
#if LUAJIT_SMART_STRINGS
433
      if (strsmart(&o->str)) {
4,526,821✔
434
        /* must match lj_str_new */
435
        bloomset(g->strbloom.next[0], o->str.hash >> (sizeof(o->str.hash)*8-6));
5,379✔
436
        bloomset(g->strbloom.next[1], o->str.strflags);
5,379✔
437
      }
438
#endif
439
      p = &o->gch.nextgc;
4,526,821✔
440
    } else {  /* Otherwise value is dead, free it. */
441
      lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
468,653✔
442
                 "sweep of unlive string");
443
      setgcrefr(*p, o->gch.nextgc);
468,653✔
444
      lj_str_free(g, &o->str);
468,653✔
445
    }
446
  }
447
  return p;
8,644,780✔
448
}
449

450
/* Check whether we can clear a key or a value slot from a table. */
451
static int gc_mayclear(cTValue *o, int val)
452
{
453
  if (tvisgcv(o)) {  /* Only collectable objects can be weak references. */
454
    if (tvisstr(o)) {  /* But strings cannot be used as weak references. */
455
      gc_mark_str(strV(o));  /* And need to be marked. */
456
      return 0;
457
    }
458
    if (iswhite(gcV(o)))
459
      return 1;  /* Object is about to be collected. */
460
    if (tvisudata(o) && val && isfinalized(udataV(o)))
461
      return 1;  /* Finalized userdata is dropped only from values. */
462
  }
463
  return 0;  /* Cannot clear. */
464
}
465

466
/* Clear collected entries from weak tables. */
467
static void gc_clearweak(global_State *g, GCobj *o)
468
{
469
  UNUSED(g);
470
  while (o) {
471
    GCtab *t = gco2tab(o);
472
    lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table");
473
    if ((t->marked & LJ_GC_WEAKVAL)) {
474
      MSize i, asize = t->asize;
475
      for (i = 0; i < asize; i++) {
476
        /* Clear array slot when value is about to be collected. */
477
        TValue *tv = arrayslot(t, i);
478
        if (gc_mayclear(tv, 1))
479
          setnilV(tv);
480
      }
481
    }
482
    if (t->hmask > 0) {
483
      Node *node = noderef(t->node);
484
      MSize i, hmask = t->hmask;
485
      for (i = 0; i <= hmask; i++) {
486
        Node *n = &node[i];
487
        /* Clear hash slot when key or value is about to be collected. */
488
        if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
489
                                  gc_mayclear(&n->val, 1)))
490
          setnilV(&n->val);
491
      }
492
    }
493
    o = gcref(t->gclist);
494
  }
495
}
496

497
/* Call a userdata or cdata finalizer. */
498
static void gc_call_finalizer(global_State *g, lua_State *L,
499
                              cTValue *mo, GCobj *o)
500
{
501
  /* Save and restore lots of state around the __gc callback. */
502
  uint8_t oldh = hook_save(g);
503
  GCSize oldt = g->gc.threshold;
504
  int errcode;
505
  TValue *top;
506
  lj_trace_abort(g);
507
  hook_entergc(g);  /* Disable hooks and new traces during __gc. */
508
  if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
509
  g->gc.threshold = LJ_MAX_MEM;  /* Prevent GC steps. */
510
  top = L->top;
511
  copyTV(L, top++, mo);
512
  if (LJ_FR2) setnilV(top++);
513
  setgcV(L, top, o, ~o->gch.gct);
514
  L->top = top+1;
515
  errcode = lj_vm_pcall(L, top, 1+0, -1);  /* Stack: |mo|o| -> | */
516
  hook_restore(g, oldh);
517
  if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
518
  g->gc.threshold = oldt;  /* Restore GC threshold. */
519
  if (errcode) {
520
    ptrdiff_t errobj = savestack(L, L->top-1);  /* Stack may be resized. */
521
    lj_vmevent_send(L, ERRFIN,
522
      copyTV(L, L->top++, restorestack(L, errobj));
523
    );
524
    L->top--;
525
  }
526
}
527

528
/* Finalize one userdata or cdata object from the mmudata list. */
529
static void gc_finalize(lua_State *L)
1,011,535✔
530
{
531
  global_State *g = G(L);
1,011,535✔
532
  GCobj *o = gcnext(gcref(g->gc.mmudata));
1,011,535✔
533
  cTValue *mo;
1,011,535✔
534
  lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace");
1,011,535✔
535
  /* Unchain from list of userdata to be finalized. */
536
  if (o == gcref(g->gc.mmudata))
1,011,535✔
537
    setgcrefnull(g->gc.mmudata);
6,236✔
538
  else
539
    setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
1,005,299✔
540
#if LJ_HASFFI
541
  if (o->gch.gct == ~LJ_TCDATA) {
1,011,535✔
542
    TValue tmp, *tv;
7,590✔
543
    /* Add cdata back to the GC list and make it white. */
544
    setgcrefr(o->gch.nextgc, g->gc.root);
7,590✔
545
    setgcref(g->gc.root, o);
7,590✔
546
    makewhite(g, o);
7,590✔
547
    o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
7,590✔
548
    /* Resolve finalizer. */
549
    setcdataV(L, &tmp, gco2cd(o));
7,590✔
550
    tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
7,590✔
551
    if (!tvisnil(tv)) {
7,590✔
552
      g->gc.nocdatafin = 0;
7,590✔
553
      copyTV(L, &tmp, tv);
7,590✔
554
      setnilV(tv);  /* Clear entry in finalizer table. */
7,590✔
555
      gc_call_finalizer(g, L, &tmp, o);
7,590✔
556
    }
557
    return;
7,590✔
558
  }
559
#endif
560
  /* Add userdata back to the main userdata list and make it white. */
561
  setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
1,003,945✔
562
  setgcref(mainthread(g)->nextgc, o);
1,003,945✔
563
  makewhite(g, o);
1,003,945✔
564
  /* Resolve the __gc metamethod. */
565
  mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
1,003,945✔
566
  if (mo)
1,003,945✔
567
    gc_call_finalizer(g, L, mo, o);
1,003,945✔
568
}
569

570
/* Finalize all userdata objects from mmudata list. */
571
void lj_gc_finalize_udata(lua_State *L)
254✔
572
{
573
  while (gcref(G(L)->gc.mmudata) != NULL)
1,534✔
574
    gc_finalize(L);
1,280✔
575
}
254✔
576

577
#if LJ_HASFFI
578
/* Finalize all cdata objects from finalizer table. */
579
void lj_gc_finalize_cdata(lua_State *L)
254✔
580
{
581
  global_State *g = G(L);
254✔
582
  CTState *cts = ctype_ctsG(g);
254✔
583
  if (cts) {
254✔
584
    GCtab *t = cts->finalizer;
119✔
585
    Node *node = noderef(t->node);
119✔
586
    ptrdiff_t i;
119✔
587
    setgcrefnull(t->metatable);  /* Mark finalizer table as disabled. */
119✔
588
    for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
67,109,987✔
589
      if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
67,109,868✔
590
        GCobj *o = gcV(&node[i].key);
67,109,284✔
591
        TValue tmp;
67,109,284✔
592
        makewhite(g, o);
67,109,284✔
593
        o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
67,109,284✔
594
        copyTV(L, &tmp, &node[i].val);
67,109,284✔
595
        setnilV(&node[i].val);
67,109,284✔
596
        gc_call_finalizer(g, L, &tmp, o);
67,109,284✔
597
      }
598
  }
599
}
254✔
600
#endif
601

602
/* Free all remaining GC objects. */
603
void lj_gc_freeall(global_State *g)
245✔
604
{
605
  MSize i, strmask;
245✔
606
  /* Free everything, except super-fixed objects (the main thread). */
607
  g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
245✔
608
  gc_fullsweep(g, &g->gc.root);
245✔
609
  strmask = g->strmask;
245✔
610
  for (i = 0; i <= strmask; i++)  /* Free all string hash chains. */
175,861✔
611
    gc_fullsweep(g, &g->strhash[i]);
175,616✔
612
}
245✔
613

614
/* -- Collector ----------------------------------------------------------- */
615

616
/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
617
static void atomic(global_State *g, lua_State *L)
6,434✔
618
{
619
  size_t udsize;
6,434✔
620

621
  gc_mark_uv(g);  /* Need to remark open upvalues (the thread may be dead). */
6,434✔
622
  gc_propagate_gray(g);  /* Propagate any left-overs. */
6,434✔
623

624
  setgcrefr(g->gc.gray, g->gc.weak);  /* Empty the list of weak tables. */
6,434✔
625
  setgcrefnull(g->gc.weak);
6,434✔
626
  lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white");
6,434✔
627
  gc_markobj(g, L);  /* Mark running thread. */
6,434✔
628
  gc_traverse_curtrace(g);  /* Traverse current trace. */
6,434✔
629
  gc_mark_gcroot(g);  /* Mark GC roots (again). */
6,434✔
630
  gc_propagate_gray(g);  /* Propagate all of the above. */
6,434✔
631

632
  setgcrefr(g->gc.gray, g->gc.grayagain);  /* Empty the 2nd chance list. */
6,434✔
633
  setgcrefnull(g->gc.grayagain);
6,434✔
634
  gc_propagate_gray(g);  /* Propagate it. */
6,434✔
635

636
  udsize = lj_gc_separateudata(g, 0);  /* Separate userdata to be finalized. */
6,434✔
637
  gc_mark_mmudata(g);  /* Mark them. */
6,434✔
638
  udsize += gc_propagate_gray(g);  /* And propagate the marks. */
6,434✔
639

640
  /* All marking done, clear weak tables. */
641
  gc_clearweak(g, gcref(g->gc.weak));
6,434✔
642

643
  lj_buf_shrink(L, &g->tmpbuf);  /* Shrink temp buffer. */
6,434✔
644

645
  /* Prepare for sweep phase. */
646
  g->gc.currentwhite = (uint8_t)otherwhite(g);  /* Flip current white. */
6,434✔
647
  g->strempty.marked = g->gc.currentwhite;
6,434✔
648
  setmref(g->gc.sweep, &g->gc.root);
6,434✔
649
  g->gc.estimate = g->gc.total - (GCSize)udsize;  /* Initial estimate. */
6,434✔
650
}
6,434✔
651

652
/* GC state machine. Returns a cost estimate for each step performed. */
653
static size_t gc_onestep(lua_State *L)
21,548,497✔
654
{
655
  global_State *g = G(L);
21,548,497✔
656
  g->gc.state_count[g->gc.state]++;
21,548,497✔
657
  switch (g->gc.state) {
21,548,497✔
658
  case GCSpause:
6,460✔
659
    gc_mark_start(g);  /* Start a new GC cycle by marking all GC roots. */
6,460✔
660
    return 0;
6,460✔
661
  case GCSpropagate:
5,364,354✔
662
    if (gcref(g->gc.gray) != NULL)
5,364,354✔
663
      return propagatemark(g);  /* Propagate one gray object. */
5,357,920✔
664
    g->gc.state = GCSatomic;  /* End of mark phase. */
6,434✔
665
    return 0;
6,434✔
666
  case GCSatomic:
6,544✔
667
    if (tvref(g->jit_base))  /* Don't run atomic phase on trace. */
6,544✔
668
      return LJ_MAX_MEM;
669
    atomic(g, L);
6,434✔
670
    g->gc.state = GCSsweepstring;  /* Start of sweep phase. */
6,434✔
671
    g->gc.sweepstr = 0;
6,434✔
672
#if LUAJIT_SMART_STRINGS
673
    g->strbloom.next[0] = 0;
6,434✔
674
    g->strbloom.next[1] = 0;
6,434✔
675
#endif
676
    return 0;
6,434✔
677
  case GCSsweepstring: {
8,644,780✔
678
    GCSize old = g->gc.total;
8,644,780✔
679
    gc_sweep_str_chain(g, &g->strhash[g->gc.sweepstr++]);  /* Sweep one chain. */
8,644,780✔
680
    if (g->gc.sweepstr > g->strmask) {
8,644,780✔
681
      g->gc.state = GCSsweep;  /* All string hash chains sweeped. */
7,311✔
682
#if LUAJIT_SMART_STRINGS
683
      g->strbloom.cur[0] = g->strbloom.next[0];
7,311✔
684
      g->strbloom.cur[1] = g->strbloom.next[1];
7,311✔
685
#endif
686
    }
687
    lj_assertG(old >= g->gc.total, "sweep increased memory");
8,644,780✔
688
    g->gc.estimate -= old - g->gc.total;
8,644,780✔
689
    return GCSWEEPCOST;
8,644,780✔
690
    }
691
  case GCSsweep: {
6,510,008✔
692
    GCSize old = g->gc.total;
6,510,008✔
693
    setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
6,510,008✔
694
    lj_assertG(old >= g->gc.total, "sweep increased memory");
6,510,008✔
695
    g->gc.estimate -= old - g->gc.total;
6,510,008✔
696
    if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
6,510,008✔
697
      if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
7,310✔
698
        lj_str_resize(L, g->strmask >> 1);  /* Shrink string table. */
92✔
699
      if (gcref(g->gc.mmudata)) {  /* Need any finalizations? */
7,310✔
700
        g->gc.state = GCSfinalize;
5,991✔
701
#if LJ_HASFFI
702
        g->gc.nocdatafin = 1;
5,991✔
703
#endif
704
      } else {  /* Otherwise skip this phase to help the JIT. */
705
        g->gc.state = GCSpause;  /* End of GC cycle. */
1,319✔
706
        g->gc.debt = 0;
1,319✔
707
      }
708
    }
709
    return GCSWEEPMAX*GCSWEEPCOST;
710
    }
711
  case GCSfinalize:
1,016,351✔
712
    if (gcref(g->gc.mmudata) != NULL) {
1,016,351✔
713
      if (tvref(g->jit_base))  /* Don't call finalizers on trace. */
1,010,364✔
714
        return LJ_MAX_MEM;
715
      gc_finalize(L);  /* Finalize one userdata object. */
1,010,255✔
716
      if (g->gc.estimate > GCFINALIZECOST)
1,010,255✔
717
        g->gc.estimate -= GCFINALIZECOST;
1,010,255✔
718
      return GCFINALIZECOST;
1,010,255✔
719
    }
720
#if LJ_HASFFI
721
    if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
5,987✔
722
#endif
723
    g->gc.state = GCSpause;  /* End of GC cycle. */
5,987✔
724
    g->gc.debt = 0;
5,987✔
725
    return 0;
5,987✔
726
  default:
727
    lj_assertG(0, "bad GC state");
728
    return 0;
729
  }
730
}
731

732
/* Perform a limited amount of incremental GC steps. */
733
int LJ_FASTCALL lj_gc_step(lua_State *L)
846,468✔
734
{
735
  global_State *g = G(L);
846,468✔
736
  GCSize lim;
846,468✔
737
  int32_t ostate = g->vmstate;
846,468✔
738
  setvmstate(g, GC);
846,468✔
739
  lim = (GCSTEPSIZE/100) * g->gc.stepmul;
846,468✔
740
  if (lim == 0)
846,468✔
UNCOV
741
    lim = LJ_MAX_MEM;
×
742
  if (g->gc.total > g->gc.threshold)
846,468✔
743
    g->gc.debt += g->gc.total - g->gc.threshold;
829,766✔
744
  do {
10,640,536✔
745
    lim -= (GCSize)gc_onestep(L);
10,640,536✔
746
    if (g->gc.state == GCSpause) {
10,640,536✔
747
      g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
5,531✔
748
      g->vmstate = ostate;
5,531✔
749
      return 1;  /* Finished a GC cycle. */
5,531✔
750
    }
751
  } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
10,635,005✔
752
  if (g->gc.debt < GCSTEPSIZE) {
840,937✔
753
    g->gc.threshold = g->gc.total + GCSTEPSIZE;
257,145✔
754
    g->vmstate = ostate;
257,145✔
755
    return -1;
257,145✔
756
  } else {
757
    g->gc.debt -= GCSTEPSIZE;
583,792✔
758
    g->gc.threshold = g->gc.total;
583,792✔
759
    g->vmstate = ostate;
583,792✔
760
    return 0;
583,792✔
761
  }
762
}
763

764
/* Ditto, but fix the stack top first. */
765
void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
68,684✔
766
{
767
  if (curr_funcisL(L)) L->top = curr_topL(L);
68,684✔
768
  lj_gc_step(L);
68,684✔
769
}
68,684✔
770

771
#if LJ_HASJIT
772
/* Perform multiple GC steps. Called from JIT-compiled code. */
773
int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
592,371✔
774
{
775
  lua_State *L = gco2th(gcref(g->cur_L));
592,371✔
776
  L->base = tvref(G(L)->jit_base);
592,371✔
777
  L->top = curr_topL(L);
592,371✔
778
  while (steps-- > 0 && lj_gc_step(L) == 0)
1,185,312✔
779
    ;
1,064,288✔
780
  /* Return 1 to force a trace exit. */
781
  return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
592,371✔
782
}
783
#endif
784

785
/* Perform a full GC cycle. */
786
void lj_gc_fullgc(lua_State *L)
889✔
787
{
788
  global_State *g = G(L);
889✔
789
  int32_t ostate = g->vmstate;
889✔
790
  setvmstate(g, GC);
889✔
791
  if (g->gc.state <= GCSatomic) {  /* Caught somewhere in the middle. */
889✔
792
    setmref(g->gc.sweep, &g->gc.root);  /* Sweep everything (preserving it). */
884✔
793
    setgcrefnull(g->gc.gray);  /* Reset lists from partial propagation. */
884✔
794
    setgcrefnull(g->gc.grayagain);
884✔
795
    setgcrefnull(g->gc.weak);
884✔
796
    g->gc.state = GCSsweepstring;  /* Fast forward to the sweep phase. */
884✔
797
    g->gc.sweepstr = 0;
884✔
798
  }
799
  while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
3,947,684✔
800
    gc_onestep(L);  /* Finish sweep. */
3,946,795✔
801
  lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause,
889✔
802
             "bad GC state");
803
  /* Now perform a full GC. */
804
  g->gc.state = GCSpause;
889✔
805
  do { gc_onestep(L); } while (g->gc.state != GCSpause);
6,961,166✔
806
  g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
889✔
807
  g->vmstate = ostate;
889✔
808
}
889✔
809

810
/* -- Write barriers ------------------------------------------------------ */
811

812
/* Move the GC propagation frontier forward. */
813
void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
5,009✔
814
{
815
  lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o),
5,009✔
816
             "bad object states for forward barrier");
817
  lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
5,009✔
818
             "bad GC state");
819
  lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table");
5,009✔
820
  /* Preserve invariant during propagation. Otherwise it doesn't matter. */
821
  if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
5,009✔
822
    gc_mark(g, v);  /* Move frontier forward. */
4,933✔
823
  else
824
    makewhite(g, o);  /* Make it white to avoid the following barrier. */
76✔
UNCOV
825
}
×
826

827
/* Specialized barrier for closed upvalue. Pass &uv->tv. */
828
void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
1✔
829
{
830
#define TV2MARKED(x) \
831
  (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
832
  if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
1✔
UNCOV
833
    gc_mark(g, gcV(tv));
×
834
  else
835
    TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
1✔
836
#undef TV2MARKED
837
}
1✔
838

839
/* Close upvalue. Also needs a write barrier. */
840
void lj_gc_closeuv(global_State *g, GCupval *uv)
11,641✔
841
{
842
  GCobj *o = obj2gco(uv);
11,641✔
843
  /* Copy stack slot to upvalue itself and point to the copy. */
844
  copyTV(mainthread(g), &uv->tv, uvval(uv));
11,641✔
845
  setmref(uv->v, &uv->tv);
11,641✔
846
  uv->closed = 1;
11,641✔
847
  setgcrefr(o->gch.nextgc, g->gc.root);
11,641✔
848
  setgcref(g->gc.root, o);
11,641✔
849
  if (isgray(o)) {  /* A closed upvalue is never gray, so fix this. */
11,641✔
850
    if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
499✔
851
      gray2black(o);  /* Make it black and preserve invariant. */
206✔
852
      if (tviswhite(&uv->tv))
206✔
UNCOV
853
        lj_gc_barrierf(g, o, gcV(&uv->tv));
×
854
    } else {
855
      makewhite(g, o);  /* Make it white, i.e. sweep the upvalue. */
293✔
856
      lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
11,641✔
857
                 "bad GC state");
858
    }
859
  }
860
}
11,641✔
861

862
#if LJ_HASJIT
863
/* Mark a trace if it's saved during the propagation phase. */
864
void lj_gc_barriertrace(global_State *g, uint32_t traceno)
3,742✔
865
{
866
  if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
3,742✔
867
    gc_marktrace(g, traceno);
373✔
868
}
3,742✔
869
#endif
870

871
/* -- Allocator ----------------------------------------------------------- */
872

873
/* Call pluggable memory allocator to allocate or resize a fragment. */
874
void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
4,051,093✔
875
{
876
  global_State *g = G(L);
4,051,093✔
877
  lj_assertG((osz == 0) == (p == NULL), "realloc API violation");
4,051,093✔
878

879
  setgcref(g->mem_L, obj2gco(L));
4,051,093✔
880
  p = g->allocf(g->allocd, p, osz, nsz);
4,051,093✔
881
  if (p == NULL && nsz > 0)
4,051,093✔
UNCOV
882
    lj_err_mem(L);
×
883
  lj_assertG((nsz == 0) == (p == NULL), "allocf API violation");
4,051,093✔
884
  lj_assertG(checkptrGC(p),
4,051,093✔
885
             "allocated memory address %p outside required range", p);
886
  g->gc.total = (g->gc.total - osz) + nsz;
4,051,093✔
887
  g->gc.allocated += nsz;
4,051,093✔
888
  g->gc.freed += osz;
4,051,093✔
889
  return p;
4,051,093✔
890
}
891

892
/* Allocate new GC object and link it to the root set. */
893
void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
138,858,840✔
894
{
895
  global_State *g = G(L);
138,858,840✔
896
  GCobj *o;
138,858,840✔
897

898
  setgcref(g->mem_L, obj2gco(L));
138,858,840✔
899
  o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
138,858,840✔
900
  if (o == NULL)
138,858,840✔
UNCOV
901
    lj_err_mem(L);
×
902
  lj_assertG(checkptrGC(o),
138,858,840✔
903
             "allocated memory address %p outside required range", o);
904
  g->gc.total += size;
138,858,840✔
905
  g->gc.allocated += size;
138,858,840✔
906
  setgcrefr(o->gch.nextgc, g->gc.root);
138,858,840✔
907
  setgcref(g->gc.root, o);
138,858,840✔
908
  newwhite(g, o);
138,858,840✔
909
  return o;
138,858,840✔
910
}
911

912
/* Resize growable vector. */
913
void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
176,922✔
914
{
915
  MSize sz = (*szp) << 1;
176,922✔
916
  if (sz < LJ_MIN_VECSZ)
176,922✔
917
    sz = LJ_MIN_VECSZ;
918
  if (sz > lim)
176,922✔
919
    sz = lim;
920
  p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
176,922✔
921
  *szp = sz;
176,922✔
922
  return p;
176,922✔
923
}
924

STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc