• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / luajit / 9593251616

20 Jun 2024 07:14AM UTC coverage: 92.646% (+0.02%) from 92.624%
9593251616

push

github

Buristan
cmake: fix warning about minimum required version

Since CMake 3.27 compatibility with versions of CMake older than
3.5 is deprecated [1]. CMake produces an annoying warning on the
configuration stage:

| CMake Deprecation Warning at src/CMakeLists.txt:7 (cmake_minimum_required):
|  Compatibility with CMake < 3.5 will be removed from a future version of
|  CMake.

We cannot bump a minimum required CMake version without bumping it in
the Tarantool build system. However, we can set a <policy_max>
(introduced in CMake 3.12, see [1]) and suppress a warning. <policy_max>
means that this CMake version is known to work properly and will not
result in any build errors right away for higher versions.

Note that the current CMake minimum required version in Tarantool is
equal to 2.8, but <policy_max> is introduced in CMake 3.12 [1].
However, according to [1] it is not a problem because if CMake is "older
than 3.12, the extra ... dots will be seen as version component
separators, resulting in the ...<max> part being ignored and preserving
the pre-3.12 behavior of basing policies on <min>".

<policy_max> is set to 3.18 because compatibility with versions of CMake
older than 2.8.12 is deprecated. Calls to
cmake_minimum_required(VERSION) that do not specify at least 2.8.12 as
their policy version (optionally via ...<max>) will produce a
deprecation warning in CMake 3.19 and above [2]. Compatibility with
2.8.12 is needed for CMP0002 [3], see commit 049e296ee114 ("test: run
LuaJIT tests via CMake").

1. https://cmake.org/cmake/help/latest/command/cmake_minimum_required.html
2. https://cmake.org/cmake/help/latest/command/cmake_minimum_required.html#policy-settings
3. https://cmake.org/cmake/help/latest/policy/CMP0002.html

5665 of 6021 branches covered (94.09%)

Branch coverage included in aggregate %.

21621 of 23431 relevant lines covered (92.28%)

2937993.42 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.0
/src/lj_gc.c
1
/*
2
** Garbage collector.
3
** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4
**
5
** Major portions taken verbatim or adapted from the Lua interpreter.
6
** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7
*/
8

9
#define lj_gc_c
10
#define LUA_CORE
11

12
#include "lj_obj.h"
13
#include "lj_gc.h"
14
#include "lj_err.h"
15
#include "lj_buf.h"
16
#include "lj_str.h"
17
#include "lj_tab.h"
18
#include "lj_func.h"
19
#include "lj_udata.h"
20
#include "lj_meta.h"
21
#include "lj_state.h"
22
#include "lj_frame.h"
23
#if LJ_HASFFI
24
#include "lj_ctype.h"
25
#include "lj_cdata.h"
26
#endif
27
#include "lj_trace.h"
28
#include "lj_dispatch.h"
29
#include "lj_vm.h"
30
#include "lj_vmevent.h"
31

32
#define GCSTEPSIZE        1024u
33
#define GCSWEEPMAX        40
34
#define GCSWEEPCOST        10
35
#define GCFINALIZECOST        100
36

37
/* Macros to set GCobj colors and flags. */
38
#define white2gray(x)                ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
39
#define gray2black(x)                ((x)->gch.marked |= LJ_GC_BLACK)
40
#define isfinalized(u)                ((u)->marked & LJ_GC_FINALIZED)
41

42
/* -- Mark phase ---------------------------------------------------------- */
43

44
/* Mark a TValue (if needed). */
45
#define gc_marktv(g, tv) \
46
  { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \
47
               "TValue and GC type mismatch"); \
48
    if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
49

50
/* Mark a GCobj (if needed). */
51
#define gc_markobj(g, o) \
52
  { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
53

54
/* Mark a string object. */
55
#define gc_mark_str(s)                ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
56

57
/* Mark a white GCobj. */
58
static void gc_mark(global_State *g, GCobj *o)
191,478,883✔
59
{
60
  int gct = o->gch.gct;
191,485,477✔
61
  lj_assertG(iswhite(o), "mark of non-white object");
191,485,477✔
62
  lj_assertG(!isdead(g, o), "mark of dead object");
191,485,477✔
63
  white2gray(o);
191,485,477✔
64
  if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
191,485,477✔
65
    GCtab *mt = tabref(gco2ud(o)->metatable);
1,054,688✔
66
    gray2black(o);  /* Userdata are never gray. */
1,054,688✔
67
    if (mt) gc_markobj(g, mt);
1,054,688✔
68
    gc_markobj(g, tabref(gco2ud(o)->env));
1,054,688✔
69
  } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
190,430,789✔
70
    GCupval *uv = gco2uv(o);
1,700,344✔
71
    gc_marktv(g, uvval(uv));
1,700,344✔
72
    if (uv->closed)
1,700,344✔
73
      gray2black(o);  /* Closed upvalues are never gray. */
134,560✔
74
  } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
188,730,445✔
75
    lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
8,256,354✔
76
               gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE,
77
               "bad GC type %d", gct);
78
    setgcrefr(o->gch.gclist, g->gc.gray);
8,256,354✔
79
    setgcref(g->gc.gray, o);
8,256,354✔
80
  }
81
}
191,478,883✔
82

83
/* Mark GC roots. */
84
static void gc_mark_gcroot(global_State *g)
13,315✔
85
{
86
  ptrdiff_t i;
13,315✔
87
  for (i = 0; i < GCROOT_MAX; i++)
519,285✔
88
    if (gcref(g->gcroot[i]) != NULL)
505,970✔
89
      gc_markobj(g, gcref(g->gcroot[i]));
344,230✔
90
}
13,315✔
91

92
/* Start a GC cycle and mark the root set. */
93
static void gc_mark_start(global_State *g)
6,677✔
94
{
95
  setgcrefnull(g->gc.gray);
6,677✔
96
  setgcrefnull(g->gc.grayagain);
6,677✔
97
  setgcrefnull(g->gc.weak);
6,677✔
98
  gc_markobj(g, mainthread(g));
6,677✔
99
  gc_markobj(g, tabref(mainthread(g)->env));
6,677✔
100
  gc_marktv(g, &g->registrytv);
6,677✔
101
  gc_mark_gcroot(g);
6,677✔
102
  g->gc.state = GCSpropagate;
6,677✔
103
}
6,677✔
104

105
/* Mark open upvalues. */
106
static void gc_mark_uv(global_State *g)
6,638✔
107
{
108
  GCupval *uv;
6,638✔
109
  for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
1,572,366✔
110
    lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
1,565,728✔
111
               "broken upvalue chain");
112
    if (isgray(obj2gco(uv)))
1,565,728✔
113
      gc_marktv(g, uvval(uv));
1,565,728✔
114
  }
115
}
6,638✔
116

117
/* Mark userdata in mmudata list. */
118
static void gc_mark_mmudata(global_State *g)
6,638✔
119
{
120
  GCobj *root = gcref(g->gc.mmudata);
6,638✔
121
  GCobj *u = root;
6,638✔
122
  if (u) {
6,638✔
123
    do {
1,002,757✔
124
      u = gcnext(u);
1,002,757✔
125
      makewhite(g, u);  /* Could be from previous GC. */
1,002,757✔
126
      gc_mark(g, u);
1,002,757✔
127
    } while (u != root);
1,002,757✔
128
  }
129
}
6,638✔
130

131
/* Separate userdata objects to be finalized to mmudata list. */
132
size_t lj_gc_separateudata(global_State *g, int all)
7,290✔
133
{
134
  size_t m = 0;
7,290✔
135
  GCRef *p = &mainthread(g)->nextgc;
7,290✔
136
  GCobj *o;
7,290✔
137
  while ((o = gcref(*p)) != NULL) {
2,070,737✔
138
    if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
2,063,447✔
139
      p = &o->gch.nextgc;  /* Nothing to do. */
1,056,789✔
140
    } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
1,006,658✔
141
      markfinalized(o);  /* Done, as there's no __gc metamethod. */
2,335✔
142
      p = &o->gch.nextgc;
2,335✔
143
    } else {  /* Otherwise move userdata to be finalized to mmudata list. */
144
      m += sizeudata(gco2ud(o));
1,004,323✔
145
      markfinalized(o);
1,004,323✔
146
      *p = o->gch.nextgc;
1,004,323✔
147
      if (gcref(g->gc.mmudata)) {  /* Link to end of mmudata list. */
1,004,323✔
148
        GCobj *root = gcref(g->gc.mmudata);
998,044✔
149
        setgcrefr(o->gch.nextgc, root->gch.nextgc);
998,044✔
150
        setgcref(root->gch.nextgc, o);
998,044✔
151
        setgcref(g->gc.mmudata, o);
998,044✔
152
      } else {  /* Create circular list. */
153
        setgcref(o->gch.nextgc, o);
6,279✔
154
        setgcref(g->gc.mmudata, o);
6,279✔
155
      }
156
    }
157
  }
158
  return m;
7,290✔
159
}
160

161
/* -- Propagation phase --------------------------------------------------- */
162

163
/* Traverse a table. */
164
static int gc_traverse_tab(global_State *g, GCtab *t)
3,421,931✔
165
{
166
  int weak = 0;
3,421,931✔
167
  cTValue *mode;
3,421,931✔
168
  GCtab *mt = tabref(t->metatable);
3,421,931✔
169
  if (mt)
3,421,931✔
170
    gc_markobj(g, mt);
33,221✔
171
  mode = lj_meta_fastg(g, mt, MM_mode);
3,421,931✔
172
  if (mode && tvisstr(mode)) {  /* Valid __mode field? */
19,158✔
173
    const char *modestr = strVdata(mode);
19,031✔
174
    int c;
19,031✔
175
    while ((c = *modestr++)) {
51,416✔
176
      if (c == 'k') weak |= LJ_GC_WEAKKEY;
32,385✔
177
      else if (c == 'v') weak |= LJ_GC_WEAKVAL;
13,376✔
178
    }
179
    if (weak) {  /* Weak tables are cleared in the atomic phase. */
19,031✔
180
#if LJ_HASFFI
181
      CTState *cts = ctype_ctsG(g);
19,031✔
182
      if (cts && cts->finalizer == t) {
19,031✔
183
        weak = (int)(~0u & ~LJ_GC_WEAKVAL);
184
      } else
185
#endif
186
      {
187
        t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
13,378✔
188
        setgcrefr(t->gclist, g->gc.weak);
13,378✔
189
        setgcref(g->gc.weak, obj2gco(t));
13,378✔
190
      }
191
    }
192
  }
193
  if (weak == LJ_GC_WEAK)  /* Nothing to mark if both keys/values are weak. */
27,441✔
194
    return 1;
195
  if (!(weak & LJ_GC_WEAKVAL)) {  /* Mark array part. */
3,408,577✔
196
    MSize i, asize = t->asize;
3,408,555✔
197
    for (i = 0; i < asize; i++)
683,730,408✔
198
      gc_marktv(g, arrayslot(t, i));
680,321,853✔
199
  }
200
  if (t->hmask > 0) {  /* Mark hash part. */
3,408,577✔
201
    Node *node = noderef(t->node);
1,731,744✔
202
    MSize i, hmask = t->hmask;
1,731,744✔
203
    for (i = 0; i <= hmask; i++) {
754,967,594✔
204
      Node *n = &node[i];
753,235,850✔
205
      if (!tvisnil(&n->val)) {  /* Mark non-empty slot. */
753,235,850✔
206
        lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot");
447,265,210✔
207
        if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
447,265,210✔
208
        if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
753,235,850✔
209
      }
210
    }
211
  }
212
  return weak;
213
}
214

215
/* Traverse a function. */
216
static void gc_traverse_func(global_State *g, GCfunc *fn)
3,545,584✔
217
{
218
  gc_markobj(g, tabref(fn->c.env));
3,545,584✔
219
  if (isluafunc(fn)) {
3,545,584✔
220
    uint32_t i;
2,192,553✔
221
    lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv,
2,192,553✔
222
               "function upvalues out of range");
223
    gc_markobj(g, funcproto(fn));
2,192,553✔
224
    for (i = 0; i < fn->l.nupvalues; i++)  /* Mark Lua function upvalues. */
4,126,115✔
225
      gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
1,933,562✔
226
  } else {
227
    uint32_t i;
228
    for (i = 0; i < fn->c.nupvalues; i++)  /* Mark C function upvalues. */
1,486,219✔
229
      gc_marktv(g, &fn->c.upvalue[i]);
133,188✔
230
  }
231
}
3,545,584✔
232

233
#if LJ_HASJIT
234
/* Mark a trace. */
235
static void gc_marktrace(global_State *g, TraceNo traceno)
156,110✔
236
{
237
  GCobj *o = obj2gco(traceref(G2J(g), traceno));
156,110✔
238
  lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped");
156,110✔
239
  if (iswhite(o)) {
156,110✔
240
    white2gray(o);
85,523✔
241
    setgcrefr(o->gch.gclist, g->gc.gray);
85,523✔
242
    setgcref(g->gc.gray, o);
85,523✔
243
  }
244
}
245

246
/* Traverse a trace. */
247
static void gc_traverse_trace(global_State *g, GCtrace *T)
92,181✔
248
{
249
  IRRef ref;
92,181✔
250
  if (T->traceno == 0) return;
92,181✔
251
  for (ref = T->nk; ref < REF_TRUE; ref++) {
1,100,124✔
252
    IRIns *ir = &T->ir[ref];
1,014,623✔
253
    if (ir->o == IR_KGC)
1,014,623✔
254
      gc_markobj(g, ir_kgc(ir));
351,063✔
255
    if (irt_is64(ir->t) && ir->o != IR_KNULL)
1,014,623✔
256
      ref++;
547,990✔
257
  }
258
  if (T->link) gc_marktrace(g, T->link);
85,501✔
259
  if (T->nextroot) gc_marktrace(g, T->nextroot);
85,501✔
260
  if (T->nextside) gc_marktrace(g, T->nextside);
85,501✔
261
  gc_markobj(g, gcref(T->startpt));
85,501✔
262
}
263

264
/* The current trace is a GC root while not anchored in the prototype (yet). */
265
#define gc_traverse_curtrace(g)        gc_traverse_trace(g, &G2J(g)->cur)
266
#else
267
#define gc_traverse_curtrace(g)        UNUSED(g)
268
#endif
269

270
/* Traverse a prototype. */
271
static void gc_traverse_proto(global_State *g, GCproto *pt)
506,499✔
272
{
273
  ptrdiff_t i;
506,499✔
274
  gc_mark_str(proto_chunkname(pt));
506,499✔
275
  for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++)  /* Mark collectable consts. */
2,755,019✔
276
    gc_markobj(g, proto_kgc(pt, i));
2,248,520✔
277
#if LJ_HASJIT
278
  if (pt->trace) gc_marktrace(g, pt->trace);
506,499✔
279
#endif
280
}
506,499✔
281

282
/* Traverse the frame structure of a stack. */
283
static MSize gc_traverse_frames(global_State *g, lua_State *th)
284
{
285
  TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
286
  /* Note: extra vararg frame not skipped, marks function twice (harmless). */
287
  for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
288
    GCfunc *fn = frame_func(frame);
289
    TValue *ftop = frame;
290
    if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
291
    if (ftop > top) top = ftop;
292
    if (!LJ_FR2) gc_markobj(g, fn);  /* Need to mark hidden function (or L). */
293
  }
294
  top++;  /* Correct bias of -1 (frame == base-1). */
295
  if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
296
  return (MSize)(top - bot);  /* Return minimum needed stack size. */
297
}
298

299
/* Traverse a thread object. */
300
static void gc_traverse_thread(global_State *g, lua_State *th)
1,570,579✔
301
{
302
  TValue *o, *top = th->top;
1,570,579✔
303
  for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
11,550,241✔
304
    gc_marktv(g, o);
9,979,662✔
305
  if (g->gc.state == GCSatomic) {
1,570,579✔
306
    top = tvref(th->stack) + th->stacksize;
784,823✔
307
    for (; o < top; o++)  /* Clear unmarked slots. */
31,624,158✔
308
      setnilV(o);
30,839,335✔
309
  }
310
  gc_markobj(g, tabref(th->env));
1,570,579✔
311
  lj_state_shrinkstack(th, gc_traverse_frames(g, th));
1,570,579✔
312
}
1,570,579✔
313

314
/* Propagate one gray object. Traverse it and turn it black. */
315
static size_t propagatemark(global_State *g)
9,130,136✔
316
{
317
  GCobj *o = gcref(g->gc.gray);
9,130,136✔
318
  int gct = o->gch.gct;
9,130,136✔
319
  lj_assertG(isgray(o), "propagation of non-gray object");
9,130,136✔
320
  gray2black(o);
9,130,136✔
321
  setgcrefr(g->gc.gray, o->gch.gclist);  /* Remove from gray list. */
9,130,136✔
322
  if (LJ_LIKELY(gct == ~LJ_TTAB)) {
9,130,136✔
323
    GCtab *t = gco2tab(o);
3,421,931✔
324
    if (gc_traverse_tab(g, t) > 0)
3,421,931✔
325
      black2gray(o);  /* Keep weak tables gray. */
13,378✔
326
    return sizeof(GCtab) + sizeof(TValue) * t->asize +
3,421,931✔
327
                           (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0);
3,421,931✔
328
  } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
5,708,205✔
329
    GCfunc *fn = gco2func(o);
3,545,584✔
330
    gc_traverse_func(g, fn);
3,545,584✔
331
    return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
3,545,584✔
332
                           sizeCfunc((MSize)fn->c.nupvalues);
1,353,031✔
333
  } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
2,162,621✔
334
    GCproto *pt = gco2pt(o);
506,499✔
335
    gc_traverse_proto(g, pt);
506,499✔
336
    return pt->sizept;
506,499✔
337
  } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
1,656,122✔
338
    lua_State *th = gco2th(o);
1,570,579✔
339
    setgcrefr(th->gclist, g->gc.grayagain);
1,570,579✔
340
    setgcref(g->gc.grayagain, o);
1,570,579✔
341
    black2gray(o);  /* Threads are never black. */
1,570,579✔
342
    gc_traverse_thread(g, th);
1,570,579✔
343
    return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
1,570,579✔
344
  } else {
345
#if LJ_HASJIT
346
    GCtrace *T = gco2trace(o);
85,543✔
347
    gc_traverse_trace(g, T);
85,543✔
348
    return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
85,543✔
349
           T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
85,543✔
350
#else
351
    lj_assertG(0, "bad GC type %d", gct);
352
    return 0;
353
#endif
354
  }
355
}
356

357
/* Propagate all gray objects. */
358
static size_t gc_propagate_gray(global_State *g)
26,552✔
359
{
360
  size_t m = 0;
13,276✔
361
  while (gcref(g->gc.gray) != NULL)
2,949,678✔
362
    m += propagatemark(g);
2,923,126✔
363
  return m;
26,552✔
364
}
365

366
/* -- Sweep phase --------------------------------------------------------- */
367

368
/* Type of GC free functions. */
369
typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
370

371
/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
372
static const GCFreeFunc gc_freefunc[] = {
373
  (GCFreeFunc)lj_str_free,
374
  (GCFreeFunc)lj_func_freeuv,
375
  (GCFreeFunc)lj_state_free,
376
  (GCFreeFunc)lj_func_freeproto,
377
  (GCFreeFunc)lj_func_free,
378
#if LJ_HASJIT
379
  (GCFreeFunc)lj_trace_free,
380
#else
381
  (GCFreeFunc)0,
382
#endif
383
#if LJ_HASFFI
384
  (GCFreeFunc)lj_cdata_free,
385
#else
386
  (GCFreeFunc)0,
387
#endif
388
  (GCFreeFunc)lj_tab_free,
389
  (GCFreeFunc)lj_udata_free
390
};
391

392
/* Full sweep of a GC list. */
393
#define gc_fullsweep(g, p)        gc_sweep(g, (p), ~(uint32_t)0)
394

395
/* Partial sweep of a GC list. */
396
static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
8,356,022✔
397
{
398
  /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
399
  int ow = otherwhite(g);
8,356,022✔
400
  GCobj *o;
8,356,022✔
401
  while ((o = gcref(*p)) != NULL && lim-- > 0) {
342,077,785✔
402
    if (o->gch.gct == ~LJ_TTHREAD)  /* Need to sweep open upvalues, too. */
333,721,763✔
403
      gc_fullsweep(g, &gco2th(o)->openupval);
1,478,358✔
404
    if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
333,721,763✔
405
      lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
193,017,364✔
406
                 "sweep of undead object");
407
      makewhite(g, o);  /* Value is alive, change to the current white. */
193,017,364✔
408
      p = &o->gch.nextgc;
193,017,364✔
409
    } else {  /* Otherwise value is dead, free it. */
410
      lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
140,704,399✔
411
                 "sweep of unlive object");
412
      setgcrefr(*p, o->gch.nextgc);
140,704,399✔
413
      if (o == gcref(g->gc.root))
140,704,399✔
414
        setgcrefr(g->gc.root, o->gch.nextgc);  /* Adjust list anchor. */
×
415
      gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
140,704,399✔
416
    }
417
  }
418
  return p;
8,356,022✔
419
}
420

421
/* Full sweep of a string chain. */
422
static GCRef *gc_sweep_str_chain(global_State *g, GCRef *p)
12,184,029✔
423
{
424
  /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
425
  int ow = otherwhite(g);
12,184,029✔
426
  GCobj *o;
12,184,029✔
427
  while ((o = gcref(*p)) != NULL) {
18,568,359✔
428
    if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
6,384,330✔
429
      lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
5,838,954✔
430
                 "sweep of undead string");
431
      makewhite(g, o);  /* Value is alive, change to the current white. */
5,838,954✔
432
#if LUAJIT_SMART_STRINGS
433
      if (strsmart(&o->str)) {
5,838,954✔
434
        /* must match lj_str_new */
435
        bloomset(g->strbloom.next[0], o->str.hash >> (sizeof(o->str.hash)*8-6));
10,883✔
436
        bloomset(g->strbloom.next[1], o->str.strflags);
10,883✔
437
      }
438
#endif
439
      p = &o->gch.nextgc;
5,838,954✔
440
    } else {  /* Otherwise value is dead, free it. */
441
      lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
545,376✔
442
                 "sweep of unlive string");
443
      setgcrefr(*p, o->gch.nextgc);
545,376✔
444
      lj_str_free(g, &o->str);
545,376✔
445
    }
446
  }
447
  return p;
12,184,029✔
448
}
449

450
/* Check whether we can clear a key or a value slot from a table. */
451
static int gc_mayclear(cTValue *o, int val)
452
{
453
  if (tvisgcv(o)) {  /* Only collectable objects can be weak references. */
454
    if (tvisstr(o)) {  /* But strings cannot be used as weak references. */
455
      gc_mark_str(strV(o));  /* And need to be marked. */
456
      return 0;
457
    }
458
    if (iswhite(gcV(o)))
459
      return 1;  /* Object is about to be collected. */
460
    if (tvisudata(o) && val && isfinalized(udataV(o)))
461
      return 1;  /* Finalized userdata is dropped only from values. */
462
  }
463
  return 0;  /* Cannot clear. */
464
}
465

466
/* Clear collected entries from weak tables. */
467
static void gc_clearweak(global_State *g, GCobj *o)
468
{
469
  UNUSED(g);
470
  while (o) {
471
    GCtab *t = gco2tab(o);
472
    lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table");
473
    if ((t->marked & LJ_GC_WEAKVAL)) {
474
      MSize i, asize = t->asize;
475
      for (i = 0; i < asize; i++) {
476
        /* Clear array slot when value is about to be collected. */
477
        TValue *tv = arrayslot(t, i);
478
        if (gc_mayclear(tv, 1))
479
          setnilV(tv);
480
      }
481
    }
482
    if (t->hmask > 0) {
483
      Node *node = noderef(t->node);
484
      MSize i, hmask = t->hmask;
485
      for (i = 0; i <= hmask; i++) {
486
        Node *n = &node[i];
487
        /* Clear hash slot when key or value is about to be collected. */
488
        if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
489
                                  gc_mayclear(&n->val, 1)))
490
          setnilV(&n->val);
491
      }
492
    }
493
    o = gcref(t->gclist);
494
  }
495
}
496

497
/* Call a userdata or cdata finalizer. */
498
static void gc_call_finalizer(global_State *g, lua_State *L,
499
                              cTValue *mo, GCobj *o)
500
{
501
  /* Save and restore lots of state around the __gc callback. */
502
  uint8_t oldh = hook_save(g);
503
  GCSize oldt = g->gc.threshold;
504
  int errcode;
505
  TValue *top;
506
  lj_trace_abort(g);
507
  hook_entergc(g);  /* Disable hooks and new traces during __gc. */
508
  if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
509
  g->gc.threshold = LJ_MAX_MEM;  /* Prevent GC steps. */
510
  top = L->top;
511
  copyTV(L, top++, mo);
512
  if (LJ_FR2) setnilV(top++);
513
  setgcV(L, top, o, ~o->gch.gct);
514
  L->top = top+1;
515
  errcode = lj_vm_pcall(L, top, 1+0, -1);  /* Stack: |mo|o| -> | */
516
  hook_restore(g, oldh);
517
  if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
518
  g->gc.threshold = oldt;  /* Restore GC threshold. */
519
  if (errcode) {
520
    ptrdiff_t errobj = savestack(L, L->top-1);  /* Stack may be resized. */
521
    lj_vmevent_send(L, ERRFIN,
522
      copyTV(L, L->top++, restorestack(L, errobj));
523
    );
524
    L->top--;
525
  }
526
}
527

528
/* Finalize one userdata or cdata object from the mmudata list. */
529
static void gc_finalize(lua_State *L)
1,044,333✔
530
{
531
  global_State *g = G(L);
1,044,333✔
532
  GCobj *o = gcnext(gcref(g->gc.mmudata));
1,044,333✔
533
  cTValue *mo;
1,044,333✔
534
  lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace");
1,044,333✔
535
  /* Unchain from list of userdata to be finalized. */
536
  if (o == gcref(g->gc.mmudata))
1,044,333✔
537
    setgcrefnull(g->gc.mmudata);
6,349✔
538
  else
539
    setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
1,037,984✔
540
#if LJ_HASFFI
541
  if (o->gch.gct == ~LJ_TCDATA) {
1,044,333✔
542
    TValue tmp, *tv;
40,010✔
543
    /* Add cdata back to the GC list and make it white. */
544
    setgcrefr(o->gch.nextgc, g->gc.root);
40,010✔
545
    setgcref(g->gc.root, o);
40,010✔
546
    makewhite(g, o);
40,010✔
547
    o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
40,010✔
548
    /* Resolve finalizer. */
549
    setcdataV(L, &tmp, gco2cd(o));
40,010✔
550
    tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
40,010✔
551
    if (!tvisnil(tv)) {
40,010✔
552
      g->gc.nocdatafin = 0;
39,758✔
553
      copyTV(L, &tmp, tv);
39,758✔
554
      setnilV(tv);  /* Clear entry in finalizer table. */
39,758✔
555
      gc_call_finalizer(g, L, &tmp, o);
39,758✔
556
    }
557
    return;
40,010✔
558
  }
559
#endif
560
  /* Add userdata back to the main userdata list and make it white. */
561
  setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
1,004,323✔
562
  setgcref(mainthread(g)->nextgc, o);
1,004,323✔
563
  makewhite(g, o);
1,004,323✔
564
  /* Resolve the __gc metamethod. */
565
  mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
1,004,323✔
566
  if (mo)
1,004,323✔
567
    gc_call_finalizer(g, L, mo, o);
1,004,323✔
568
}
569

570
/* Finalize all userdata objects from mmudata list. */
571
void lj_gc_finalize_udata(lua_State *L)
331✔
572
{
573
  while (gcref(G(L)->gc.mmudata) != NULL)
2,152✔
574
    gc_finalize(L);
1,821✔
575
}
331✔
576

577
#if LJ_HASFFI
578
/* Finalize all cdata objects from finalizer table. */
579
void lj_gc_finalize_cdata(lua_State *L)
331✔
580
{
581
  global_State *g = G(L);
331✔
582
  CTState *cts = ctype_ctsG(g);
331✔
583
  if (cts) {
331✔
584
    GCtab *t = cts->finalizer;
183✔
585
    Node *node = noderef(t->node);
183✔
586
    ptrdiff_t i;
183✔
587
    setgcrefnull(t->metatable);  /* Mark finalizer table as disabled. */
183✔
588
    for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
67,110,051✔
589
      if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
67,109,868✔
590
        GCobj *o = gcV(&node[i].key);
67,109,234✔
591
        TValue tmp;
67,109,234✔
592
        makewhite(g, o);
67,109,234✔
593
        o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
67,109,234✔
594
        copyTV(L, &tmp, &node[i].val);
67,109,234✔
595
        setnilV(&node[i].val);
67,109,234✔
596
        gc_call_finalizer(g, L, &tmp, o);
67,109,234✔
597
      }
598
  }
599
}
331✔
600
#endif
601

602
/* Free all remaining GC objects. */
603
void lj_gc_freeall(global_State *g)
322✔
604
{
605
  MSize i, strmask;
322✔
606
  /* Free everything, except super-fixed objects (the main thread). */
607
  g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
322✔
608
  gc_fullsweep(g, &g->gc.root);
322✔
609
  strmask = g->strmask;
322✔
610
  for (i = 0; i <= strmask; i++)  /* Free all string hash chains. */
310,082✔
611
    gc_fullsweep(g, &g->strhash[i]);
309,760✔
612
}
322✔
613

614
/* -- Collector ----------------------------------------------------------- */
615

616
/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
617
static void atomic(global_State *g, lua_State *L)
6,638✔
618
{
619
  size_t udsize;
6,638✔
620

621
  gc_mark_uv(g);  /* Need to remark open upvalues (the thread may be dead). */
6,638✔
622
  gc_propagate_gray(g);  /* Propagate any left-overs. */
6,638✔
623

624
  setgcrefr(g->gc.gray, g->gc.weak);  /* Empty the list of weak tables. */
6,638✔
625
  setgcrefnull(g->gc.weak);
6,638✔
626
  lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white");
6,638✔
627
  gc_markobj(g, L);  /* Mark running thread. */
6,638✔
628
  gc_traverse_curtrace(g);  /* Traverse current trace. */
6,638✔
629
  gc_mark_gcroot(g);  /* Mark GC roots (again). */
6,638✔
630
  gc_propagate_gray(g);  /* Propagate all of the above. */
6,638✔
631

632
  setgcrefr(g->gc.gray, g->gc.grayagain);  /* Empty the 2nd chance list. */
6,638✔
633
  setgcrefnull(g->gc.grayagain);
6,638✔
634
  gc_propagate_gray(g);  /* Propagate it. */
6,638✔
635

636
  udsize = lj_gc_separateudata(g, 0);  /* Separate userdata to be finalized. */
6,638✔
637
  gc_mark_mmudata(g);  /* Mark them. */
6,638✔
638
  udsize += gc_propagate_gray(g);  /* And propagate the marks. */
6,638✔
639

640
  /* All marking done, clear weak tables. */
641
  gc_clearweak(g, gcref(g->gc.weak));
6,638✔
642

643
  lj_buf_shrink(L, &g->tmpbuf);  /* Shrink temp buffer. */
6,638✔
644

645
  /* Prepare for sweep phase. */
646
  g->gc.currentwhite = (uint8_t)otherwhite(g);  /* Flip current white. */
6,638✔
647
  g->strempty.marked = g->gc.currentwhite;
6,638✔
648
  setmref(g->gc.sweep, &g->gc.root);
6,638✔
649
  g->gc.estimate = g->gc.total - (GCSize)udsize;  /* Initial estimate. */
6,638✔
650
}
6,638✔
651

652
/* GC state machine. Returns a cost estimate for each step performed. */
653
static size_t gc_onestep(lua_State *L)
26,027,540✔
654
{
655
  global_State *g = G(L);
26,027,540✔
656
  g->gc.state_count[g->gc.state]++;
26,027,540✔
657
  switch (g->gc.state) {
26,027,540✔
658
  case GCSpause:
6,677✔
659
    gc_mark_start(g);  /* Start a new GC cycle by marking all GC roots. */
6,677✔
660
    return 0;
6,677✔
661
  case GCSpropagate:
6,213,648✔
662
    if (gcref(g->gc.gray) != NULL)
6,213,648✔
663
      return propagatemark(g);  /* Propagate one gray object. */
6,207,010✔
664
    g->gc.state = GCSatomic;  /* End of mark phase. */
6,638✔
665
    return 0;
6,638✔
666
  case GCSatomic:
6,789✔
667
    if (tvref(g->jit_base))  /* Don't run atomic phase on trace. */
6,789✔
668
      return LJ_MAX_MEM;
669
    atomic(g, L);
6,638✔
670
    g->gc.state = GCSsweepstring;  /* Start of sweep phase. */
6,638✔
671
    g->gc.sweepstr = 0;
6,638✔
672
#if LUAJIT_SMART_STRINGS
673
    g->strbloom.next[0] = 0;
6,638✔
674
    g->strbloom.next[1] = 0;
6,638✔
675
#endif
676
    return 0;
6,638✔
677
  case GCSsweepstring: {
12,184,029✔
678
    GCSize old = g->gc.total;
12,184,029✔
679
    gc_sweep_str_chain(g, &g->strhash[g->gc.sweepstr++]);  /* Sweep one chain. */
12,184,029✔
680
    if (g->gc.sweepstr > g->strmask) {
12,184,029✔
681
      g->gc.state = GCSsweep;  /* All string hash chains sweeped. */
7,568✔
682
#if LUAJIT_SMART_STRINGS
683
      g->strbloom.cur[0] = g->strbloom.next[0];
7,568✔
684
      g->strbloom.cur[1] = g->strbloom.next[1];
7,568✔
685
#endif
686
    }
687
    lj_assertG(old >= g->gc.total, "sweep increased memory");
12,184,029✔
688
    g->gc.estimate -= old - g->gc.total;
12,184,029✔
689
    return GCSWEEPCOST;
12,184,029✔
690
    }
691
  case GCSsweep: {
6,567,582✔
692
    GCSize old = g->gc.total;
6,567,582✔
693
    setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
6,567,582✔
694
    lj_assertG(old >= g->gc.total, "sweep increased memory");
6,567,582✔
695
    g->gc.estimate -= old - g->gc.total;
6,567,582✔
696
    if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
6,567,582✔
697
      if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
7,562✔
698
        lj_str_resize(L, g->strmask >> 1);  /* Shrink string table. */
90✔
699
      if (gcref(g->gc.mmudata)) {  /* Need any finalizations? */
7,562✔
700
        g->gc.state = GCSfinalize;
6,027✔
701
#if LJ_HASFFI
702
        g->gc.nocdatafin = 1;
6,027✔
703
#endif
704
      } else {  /* Otherwise skip this phase to help the JIT. */
705
        g->gc.state = GCSpause;  /* End of GC cycle. */
1,535✔
706
        g->gc.debt = 0;
1,535✔
707
      }
708
    }
709
    return GCSWEEPMAX*GCSWEEPCOST;
710
    }
711
  case GCSfinalize:
1,048,815✔
712
    if (gcref(g->gc.mmudata) != NULL) {
1,048,815✔
713
      if (tvref(g->jit_base))  /* Don't call finalizers on trace. */
1,042,792✔
714
        return LJ_MAX_MEM;
715
      gc_finalize(L);  /* Finalize one userdata object. */
1,042,512✔
716
      if (g->gc.estimate > GCFINALIZECOST)
1,042,512✔
717
        g->gc.estimate -= GCFINALIZECOST;
1,042,512✔
718
      return GCFINALIZECOST;
1,042,512✔
719
    }
720
#if LJ_HASFFI
721
    if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
6,023✔
722
#endif
723
    g->gc.state = GCSpause;  /* End of GC cycle. */
6,023✔
724
    g->gc.debt = 0;
6,023✔
725
    return 0;
6,023✔
726
  default:
727
    lj_assertG(0, "bad GC state");
728
    return 0;
729
  }
730
}
731

732
/* Perform a limited amount of incremental GC steps. */
733
int LJ_FASTCALL lj_gc_step(lua_State *L)
857,757✔
734
{
735
  global_State *g = G(L);
857,757✔
736
  GCSize lim;
857,757✔
737
  int32_t ostate = g->vmstate;
857,757✔
738
  setvmstate(g, GC);
857,757✔
739
  lim = (GCSTEPSIZE/100) * g->gc.stepmul;
857,757✔
740
  if (lim == 0)
857,757✔
741
    lim = LJ_MAX_MEM;
×
742
  if (g->gc.total > g->gc.threshold)
857,757✔
743
    g->gc.debt += g->gc.total - g->gc.threshold;
837,863✔
744
  do {
10,933,993✔
745
    lim -= (GCSize)gc_onestep(L);
10,933,993✔
746
    if (g->gc.state == GCSpause) {
10,933,993✔
747
      g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
5,675✔
748
      g->vmstate = ostate;
5,675✔
749
      return 1;  /* Finished a GC cycle. */
5,675✔
750
    }
751
  } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
10,928,318✔
752
  if (g->gc.debt < GCSTEPSIZE) {
852,082✔
753
    g->gc.threshold = g->gc.total + GCSTEPSIZE;
254,951✔
754
    g->vmstate = ostate;
254,951✔
755
    return -1;
254,951✔
756
  } else {
757
    g->gc.debt -= GCSTEPSIZE;
597,131✔
758
    g->gc.threshold = g->gc.total;
597,131✔
759
    g->vmstate = ostate;
597,131✔
760
    return 0;
597,131✔
761
  }
762
}
763

764
/* Ditto, but fix the stack top first. */
765
void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
66,103✔
766
{
767
  if (curr_funcisL(L)) L->top = curr_topL(L);
66,103✔
768
  lj_gc_step(L);
66,103✔
769
}
66,103✔
770

771
#if LJ_HASJIT
772
/* Perform multiple GC steps. Called from JIT-compiled code. */
773
int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
595,334✔
774
{
775
  lua_State *L = gco2th(gcref(g->cur_L));
595,334✔
776
  L->base = tvref(G(L)->jit_base);
595,334✔
777
  L->top = curr_topL(L);
595,334✔
778
  while (steps-- > 0 && lj_gc_step(L) == 0)
1,191,418✔
779
    ;
1,075,217✔
780
  /* Return 1 to force a trace exit. */
781
  return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
595,334✔
782
}
783
#endif
784

785
/* Perform a full GC cycle. */
786
void lj_gc_fullgc(lua_State *L)
943✔
787
{
788
  global_State *g = G(L);
943✔
789
  int32_t ostate = g->vmstate;
943✔
790
  setvmstate(g, GC);
943✔
791
  if (g->gc.state <= GCSatomic) {  /* Caught somewhere in the middle. */
943✔
792
    setmref(g->gc.sweep, &g->gc.root);  /* Sweep everything (preserving it). */
937✔
793
    setgcrefnull(g->gc.gray);  /* Reset lists from partial propagation. */
937✔
794
    setgcrefnull(g->gc.grayagain);
937✔
795
    setgcrefnull(g->gc.weak);
937✔
796
    g->gc.state = GCSsweepstring;  /* Fast forward to the sweep phase. */
937✔
797
    g->gc.sweepstr = 0;
937✔
798
  }
799
  while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
5,645,643✔
800
    gc_onestep(L);  /* Finish sweep. */
5,644,700✔
801
  lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause,
943✔
802
             "bad GC state");
803
  /* Now perform a full GC. */
804
  g->gc.state = GCSpause;
943✔
805
  do { gc_onestep(L); } while (g->gc.state != GCSpause);
9,448,847✔
806
  g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
943✔
807
  g->vmstate = ostate;
943✔
808
}
943✔
809

810
/* -- Write barriers ------------------------------------------------------ */
811

812
/* Move the GC propagation frontier forward. */
813
void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
1,262✔
814
{
815
  lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o),
1,262✔
816
             "bad object states for forward barrier");
817
  lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
1,262✔
818
             "bad GC state");
819
  lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table");
1,262✔
820
  /* Preserve invariant during propagation. Otherwise it doesn't matter. */
821
  if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
1,262✔
822
    gc_mark(g, v);  /* Move frontier forward. */
15✔
823
  else
824
    makewhite(g, o);  /* Make it white to avoid the following barrier. */
1,247✔
825
}
×
826

827
/* Specialized barrier for closed upvalue. Pass &uv->tv. */
828
void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
1✔
829
{
830
#define TV2MARKED(x) \
831
  (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
832
  if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
1✔
833
    gc_mark(g, gcV(tv));
×
834
  else
835
    TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
1✔
836
#undef TV2MARKED
837
}
1✔
838

839
/* Close upvalue. Also needs a write barrier. */
840
void lj_gc_closeuv(global_State *g, GCupval *uv)
14,571✔
841
{
842
  GCobj *o = obj2gco(uv);
14,571✔
843
  /* Copy stack slot to upvalue itself and point to the copy. */
844
  copyTV(mainthread(g), &uv->tv, uvval(uv));
14,571✔
845
  setmref(uv->v, &uv->tv);
14,571✔
846
  uv->closed = 1;
14,571✔
847
  setgcrefr(o->gch.nextgc, g->gc.root);
14,571✔
848
  setgcref(g->gc.root, o);
14,571✔
849
  if (isgray(o)) {  /* A closed upvalue is never gray, so fix this. */
14,571✔
850
    if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
541✔
851
      gray2black(o);  /* Make it black and preserve invariant. */
228✔
852
      if (tviswhite(&uv->tv))
228✔
853
        lj_gc_barrierf(g, o, gcV(&uv->tv));
×
854
    } else {
855
      makewhite(g, o);  /* Make it white, i.e. sweep the upvalue. */
313✔
856
      lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
14,571✔
857
                 "bad GC state");
858
    }
859
  }
860
}
14,571✔
861

862
#if LJ_HASJIT
863
/* Mark a trace if it's saved during the propagation phase. */
864
void lj_gc_barriertrace(global_State *g, uint32_t traceno)
15,554✔
865
{
866
  if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
15,554✔
867
    gc_marktrace(g, traceno);
548✔
868
}
15,554✔
869
#endif
870

871
/* -- Allocator ----------------------------------------------------------- */
872

873
/* Call pluggable memory allocator to allocate or resize a fragment. */
874
void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
4,512,820✔
875
{
876
  global_State *g = G(L);
4,512,820✔
877
  lj_assertG((osz == 0) == (p == NULL), "realloc API violation");
4,512,820✔
878

879
  setgcref(g->mem_L, obj2gco(L));
4,512,820✔
880
  p = g->allocf(g->allocd, p, osz, nsz);
4,512,820✔
881
  if (p == NULL && nsz > 0)
4,512,820✔
882
    lj_err_mem(L);
2✔
883
  lj_assertG((nsz == 0) == (p == NULL), "allocf API violation");
4,512,818✔
884
  lj_assertG(checkptrGC(p),
4,512,818✔
885
             "allocated memory address %p outside required range", p);
886
  g->gc.total = (g->gc.total - osz) + nsz;
4,512,818✔
887
  g->gc.allocated += nsz;
4,512,818✔
888
  g->gc.freed += osz;
4,512,818✔
889
  return p;
4,512,818✔
890
}
891

892
/* Allocate new GC object and link it to the root set. */
893
void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
139,387,805✔
894
{
895
  global_State *g = G(L);
139,387,805✔
896
  GCobj *o;
139,387,805✔
897

898
  setgcref(g->mem_L, obj2gco(L));
139,387,805✔
899
  o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
139,387,805✔
900
  if (o == NULL)
139,387,805✔
901
    lj_err_mem(L);
×
902
  lj_assertG(checkptrGC(o),
139,387,805✔
903
             "allocated memory address %p outside required range", o);
904
  g->gc.total += size;
139,387,805✔
905
  g->gc.allocated += size;
139,387,805✔
906
  setgcrefr(o->gch.nextgc, g->gc.root);
139,387,805✔
907
  setgcref(g->gc.root, o);
139,387,805✔
908
  newwhite(g, o);
139,387,805✔
909
  return o;
139,387,805✔
910
}
911

912
/* Resize growable vector. */
913
void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
180,166✔
914
{
915
  MSize sz = (*szp) << 1;
180,166✔
916
  if (sz < LJ_MIN_VECSZ)
180,166✔
917
    sz = LJ_MIN_VECSZ;
918
  if (sz > lim)
180,166✔
919
    sz = lim;
920
  p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
180,166✔
921
  *szp = sz;
180,165✔
922
  return p;
180,165✔
923
}
924

STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc