FFI: Turn FFI finalizer table into a proper GC root.
[luajit-2.0.git] / src / lj_gc.c
blob9cabdef0ff71a8ea27e14db1d433a17e2da3a94a
1 /*
2 ** Garbage collector.
3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
4 **
5 ** Major portions taken verbatim or adapted from the Lua interpreter.
6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7 */
9 #define lj_gc_c
10 #define LUA_CORE
12 #include "lj_obj.h"
13 #include "lj_gc.h"
14 #include "lj_err.h"
15 #include "lj_buf.h"
16 #include "lj_str.h"
17 #include "lj_tab.h"
18 #include "lj_func.h"
19 #include "lj_udata.h"
20 #include "lj_meta.h"
21 #include "lj_state.h"
22 #include "lj_frame.h"
23 #if LJ_HASFFI
24 #include "lj_ctype.h"
25 #include "lj_cdata.h"
26 #endif
27 #include "lj_trace.h"
28 #include "lj_dispatch.h"
29 #include "lj_vm.h"
30 #include "lj_vmevent.h"
32 #define GCSTEPSIZE 1024u
33 #define GCSWEEPMAX 40
34 #define GCSWEEPCOST 10
35 #define GCFINALIZECOST 100
37 /* Macros to set GCobj colors and flags. */
38 #define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
39 #define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
40 #define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
42 /* -- Mark phase ---------------------------------------------------------- */
44 /* Mark a TValue (if needed). */
45 #define gc_marktv(g, tv) \
46 { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \
47 "TValue and GC type mismatch"); \
48 if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
50 /* Mark a GCobj (if needed). */
51 #define gc_markobj(g, o) \
52 { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
54 /* Mark a string object. */
55 #define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
57 /* Mark a white GCobj. */
58 static void gc_mark(global_State *g, GCobj *o)
60 int gct = o->gch.gct;
61 lj_assertG(iswhite(o), "mark of non-white object");
62 lj_assertG(!isdead(g, o), "mark of dead object");
63 white2gray(o);
64 if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
65 GCtab *mt = tabref(gco2ud(o)->metatable);
66 gray2black(o); /* Userdata are never gray. */
67 if (mt) gc_markobj(g, mt);
68 gc_markobj(g, tabref(gco2ud(o)->env));
69 if (LJ_HASBUFFER && gco2ud(o)->udtype == UDTYPE_BUFFER) {
70 SBufExt *sbx = (SBufExt *)uddata(gco2ud(o));
71 if (sbufiscow(sbx) && gcref(sbx->cowref))
72 gc_markobj(g, gcref(sbx->cowref));
73 if (gcref(sbx->dict_str))
74 gc_markobj(g, gcref(sbx->dict_str));
75 if (gcref(sbx->dict_mt))
76 gc_markobj(g, gcref(sbx->dict_mt));
78 } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
79 GCupval *uv = gco2uv(o);
80 gc_marktv(g, uvval(uv));
81 if (uv->closed)
82 gray2black(o); /* Closed upvalues are never gray. */
83 } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
84 lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
85 gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE,
86 "bad GC type %d", gct);
87 setgcrefr(o->gch.gclist, g->gc.gray);
88 setgcref(g->gc.gray, o);
92 /* Mark GC roots. */
93 static void gc_mark_gcroot(global_State *g)
95 ptrdiff_t i;
96 for (i = 0; i < GCROOT_MAX; i++)
97 if (gcref(g->gcroot[i]) != NULL)
98 gc_markobj(g, gcref(g->gcroot[i]));
101 /* Start a GC cycle and mark the root set. */
102 static void gc_mark_start(global_State *g)
104 setgcrefnull(g->gc.gray);
105 setgcrefnull(g->gc.grayagain);
106 setgcrefnull(g->gc.weak);
107 gc_markobj(g, mainthread(g));
108 gc_markobj(g, tabref(mainthread(g)->env));
109 gc_marktv(g, &g->registrytv);
110 gc_mark_gcroot(g);
111 g->gc.state = GCSpropagate;
114 /* Mark open upvalues. */
115 static void gc_mark_uv(global_State *g)
117 GCupval *uv;
118 for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
119 lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
120 "broken upvalue chain");
121 if (isgray(obj2gco(uv)))
122 gc_marktv(g, uvval(uv));
126 /* Mark userdata in mmudata list. */
127 static void gc_mark_mmudata(global_State *g)
129 GCobj *root = gcref(g->gc.mmudata);
130 GCobj *u = root;
131 if (u) {
132 do {
133 u = gcnext(u);
134 makewhite(g, u); /* Could be from previous GC. */
135 gc_mark(g, u);
136 } while (u != root);
140 /* Separate userdata objects to be finalized to mmudata list. */
141 size_t lj_gc_separateudata(global_State *g, int all)
143 size_t m = 0;
144 GCRef *p = &mainthread(g)->nextgc;
145 GCobj *o;
146 while ((o = gcref(*p)) != NULL) {
147 if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
148 p = &o->gch.nextgc; /* Nothing to do. */
149 } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
150 markfinalized(o); /* Done, as there's no __gc metamethod. */
151 p = &o->gch.nextgc;
152 } else { /* Otherwise move userdata to be finalized to mmudata list. */
153 m += sizeudata(gco2ud(o));
154 markfinalized(o);
155 *p = o->gch.nextgc;
156 if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
157 GCobj *root = gcref(g->gc.mmudata);
158 setgcrefr(o->gch.nextgc, root->gch.nextgc);
159 setgcref(root->gch.nextgc, o);
160 setgcref(g->gc.mmudata, o);
161 } else { /* Create circular list. */
162 setgcref(o->gch.nextgc, o);
163 setgcref(g->gc.mmudata, o);
167 return m;
170 /* -- Propagation phase --------------------------------------------------- */
172 /* Traverse a table. */
173 static int gc_traverse_tab(global_State *g, GCtab *t)
175 int weak = 0;
176 cTValue *mode;
177 GCtab *mt = tabref(t->metatable);
178 if (mt)
179 gc_markobj(g, mt);
180 mode = lj_meta_fastg(g, mt, MM_mode);
181 if (mode && tvisstr(mode)) { /* Valid __mode field? */
182 const char *modestr = strVdata(mode);
183 int c;
184 while ((c = *modestr++)) {
185 if (c == 'k') weak |= LJ_GC_WEAKKEY;
186 else if (c == 'v') weak |= LJ_GC_WEAKVAL;
188 if (weak) { /* Weak tables are cleared in the atomic phase. */
189 #if LJ_HASFFI
190 if (gcref(g->gcroot[GCROOT_FFI_FIN]) == obj2gco(t)) {
191 weak = (int)(~0u & ~LJ_GC_WEAKVAL);
192 } else
193 #endif
195 t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
196 setgcrefr(t->gclist, g->gc.weak);
197 setgcref(g->gc.weak, obj2gco(t));
201 if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
202 return 1;
203 if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
204 MSize i, asize = t->asize;
205 for (i = 0; i < asize; i++)
206 gc_marktv(g, arrayslot(t, i));
208 if (t->hmask > 0) { /* Mark hash part. */
209 Node *node = noderef(t->node);
210 MSize i, hmask = t->hmask;
211 for (i = 0; i <= hmask; i++) {
212 Node *n = &node[i];
213 if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
214 lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot");
215 if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
216 if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
220 return weak;
223 /* Traverse a function. */
224 static void gc_traverse_func(global_State *g, GCfunc *fn)
226 gc_markobj(g, tabref(fn->c.env));
227 if (isluafunc(fn)) {
228 uint32_t i;
229 lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv,
230 "function upvalues out of range");
231 gc_markobj(g, funcproto(fn));
232 for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
233 gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
234 } else {
235 uint32_t i;
236 for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
237 gc_marktv(g, &fn->c.upvalue[i]);
241 #if LJ_HASJIT
242 /* Mark a trace. */
243 static void gc_marktrace(global_State *g, TraceNo traceno)
245 GCobj *o = obj2gco(traceref(G2J(g), traceno));
246 lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped");
247 if (iswhite(o)) {
248 white2gray(o);
249 setgcrefr(o->gch.gclist, g->gc.gray);
250 setgcref(g->gc.gray, o);
254 /* Traverse a trace. */
255 static void gc_traverse_trace(global_State *g, GCtrace *T)
257 IRRef ref;
258 if (T->traceno == 0) return;
259 for (ref = T->nk; ref < REF_TRUE; ref++) {
260 IRIns *ir = &T->ir[ref];
261 if (ir->o == IR_KGC)
262 gc_markobj(g, ir_kgc(ir));
263 if (irt_is64(ir->t) && ir->o != IR_KNULL)
264 ref++;
266 if (T->link) gc_marktrace(g, T->link);
267 if (T->nextroot) gc_marktrace(g, T->nextroot);
268 if (T->nextside) gc_marktrace(g, T->nextside);
269 gc_markobj(g, gcref(T->startpt));
272 /* The current trace is a GC root while not anchored in the prototype (yet). */
273 #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
274 #else
275 #define gc_traverse_curtrace(g) UNUSED(g)
276 #endif
278 /* Traverse a prototype. */
279 static void gc_traverse_proto(global_State *g, GCproto *pt)
281 ptrdiff_t i;
282 gc_mark_str(proto_chunkname(pt));
283 for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
284 gc_markobj(g, proto_kgc(pt, i));
285 #if LJ_HASJIT
286 if (pt->trace) gc_marktrace(g, pt->trace);
287 #endif
290 /* Traverse the frame structure of a stack. */
291 static MSize gc_traverse_frames(global_State *g, lua_State *th)
293 TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
294 /* Note: extra vararg frame not skipped, marks function twice (harmless). */
295 for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
296 GCfunc *fn = frame_func(frame);
297 TValue *ftop = frame;
298 if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
299 if (ftop > top) top = ftop;
300 if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */
302 top++; /* Correct bias of -1 (frame == base-1). */
303 if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
304 return (MSize)(top - bot); /* Return minimum needed stack size. */
307 /* Traverse a thread object. */
308 static void gc_traverse_thread(global_State *g, lua_State *th)
310 TValue *o, *top = th->top;
311 for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
312 gc_marktv(g, o);
313 if (g->gc.state == GCSatomic) {
314 top = tvref(th->stack) + th->stacksize;
315 for (; o < top; o++) /* Clear unmarked slots. */
316 setnilV(o);
318 gc_markobj(g, tabref(th->env));
319 lj_state_shrinkstack(th, gc_traverse_frames(g, th));
322 /* Propagate one gray object. Traverse it and turn it black. */
323 static size_t propagatemark(global_State *g)
325 GCobj *o = gcref(g->gc.gray);
326 int gct = o->gch.gct;
327 lj_assertG(isgray(o), "propagation of non-gray object");
328 gray2black(o);
329 setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
330 if (LJ_LIKELY(gct == ~LJ_TTAB)) {
331 GCtab *t = gco2tab(o);
332 if (gc_traverse_tab(g, t) > 0)
333 black2gray(o); /* Keep weak tables gray. */
334 return sizeof(GCtab) + sizeof(TValue) * t->asize +
335 (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0);
336 } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
337 GCfunc *fn = gco2func(o);
338 gc_traverse_func(g, fn);
339 return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
340 sizeCfunc((MSize)fn->c.nupvalues);
341 } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
342 GCproto *pt = gco2pt(o);
343 gc_traverse_proto(g, pt);
344 return pt->sizept;
345 } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
346 lua_State *th = gco2th(o);
347 setgcrefr(th->gclist, g->gc.grayagain);
348 setgcref(g->gc.grayagain, o);
349 black2gray(o); /* Threads are never black. */
350 gc_traverse_thread(g, th);
351 return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
352 } else {
353 #if LJ_HASJIT
354 GCtrace *T = gco2trace(o);
355 gc_traverse_trace(g, T);
356 return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
357 T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
358 #else
359 lj_assertG(0, "bad GC type %d", gct);
360 return 0;
361 #endif
365 /* Propagate all gray objects. */
366 static size_t gc_propagate_gray(global_State *g)
368 size_t m = 0;
369 while (gcref(g->gc.gray) != NULL)
370 m += propagatemark(g);
371 return m;
374 /* -- Sweep phase --------------------------------------------------------- */
376 /* Type of GC free functions. */
377 typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
379 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
380 static const GCFreeFunc gc_freefunc[] = {
381 (GCFreeFunc)lj_str_free,
382 (GCFreeFunc)lj_func_freeuv,
383 (GCFreeFunc)lj_state_free,
384 (GCFreeFunc)lj_func_freeproto,
385 (GCFreeFunc)lj_func_free,
386 #if LJ_HASJIT
387 (GCFreeFunc)lj_trace_free,
388 #else
389 (GCFreeFunc)0,
390 #endif
391 #if LJ_HASFFI
392 (GCFreeFunc)lj_cdata_free,
393 #else
394 (GCFreeFunc)0,
395 #endif
396 (GCFreeFunc)lj_tab_free,
397 (GCFreeFunc)lj_udata_free
400 /* Full sweep of a GC list. */
401 #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0)
403 /* Partial sweep of a GC list. */
404 static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
406 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
407 int ow = otherwhite(g);
408 GCobj *o;
409 while ((o = gcref(*p)) != NULL && lim-- > 0) {
410 if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
411 gc_fullsweep(g, &gco2th(o)->openupval);
412 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
413 lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
414 "sweep of undead object");
415 makewhite(g, o); /* Value is alive, change to the current white. */
416 p = &o->gch.nextgc;
417 } else { /* Otherwise value is dead, free it. */
418 lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
419 "sweep of unlive object");
420 setgcrefr(*p, o->gch.nextgc);
421 if (o == gcref(g->gc.root))
422 setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
423 gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
426 return p;
429 /* Sweep one string interning table chain. Preserves hashalg bit. */
430 static void gc_sweepstr(global_State *g, GCRef *chain)
432 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
433 int ow = otherwhite(g);
434 uintptr_t u = gcrefu(*chain);
435 GCRef q;
436 GCRef *p = &q;
437 GCobj *o;
438 setgcrefp(q, (u & ~(uintptr_t)1));
439 while ((o = gcref(*p)) != NULL) {
440 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
441 lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
442 "sweep of undead string");
443 makewhite(g, o); /* String is alive, change to the current white. */
444 p = &o->gch.nextgc;
445 } else { /* Otherwise string is dead, free it. */
446 lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
447 "sweep of unlive string");
448 setgcrefr(*p, o->gch.nextgc);
449 lj_str_free(g, gco2str(o));
452 setgcrefp(*chain, (gcrefu(q) | (u & 1)));
455 /* Check whether we can clear a key or a value slot from a table. */
456 static int gc_mayclear(cTValue *o, int val)
458 if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
459 if (tvisstr(o)) { /* But strings cannot be used as weak references. */
460 gc_mark_str(strV(o)); /* And need to be marked. */
461 return 0;
463 if (iswhite(gcV(o)))
464 return 1; /* Object is about to be collected. */
465 if (tvisudata(o) && val && isfinalized(udataV(o)))
466 return 1; /* Finalized userdata is dropped only from values. */
468 return 0; /* Cannot clear. */
471 /* Clear collected entries from weak tables. */
472 static void gc_clearweak(global_State *g, GCobj *o)
474 UNUSED(g);
475 while (o) {
476 GCtab *t = gco2tab(o);
477 lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table");
478 if ((t->marked & LJ_GC_WEAKVAL)) {
479 MSize i, asize = t->asize;
480 for (i = 0; i < asize; i++) {
481 /* Clear array slot when value is about to be collected. */
482 TValue *tv = arrayslot(t, i);
483 if (gc_mayclear(tv, 1))
484 setnilV(tv);
487 if (t->hmask > 0) {
488 Node *node = noderef(t->node);
489 MSize i, hmask = t->hmask;
490 for (i = 0; i <= hmask; i++) {
491 Node *n = &node[i];
492 /* Clear hash slot when key or value is about to be collected. */
493 if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
494 gc_mayclear(&n->val, 1)))
495 setnilV(&n->val);
498 o = gcref(t->gclist);
502 /* Call a userdata or cdata finalizer. */
503 static void gc_call_finalizer(global_State *g, lua_State *L,
504 cTValue *mo, GCobj *o)
506 /* Save and restore lots of state around the __gc callback. */
507 uint8_t oldh = hook_save(g);
508 GCSize oldt = g->gc.threshold;
509 int errcode;
510 TValue *top;
511 lj_trace_abort(g);
512 hook_entergc(g); /* Disable hooks and new traces during __gc. */
513 if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
514 g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
515 top = L->top;
516 copyTV(L, top++, mo);
517 if (LJ_FR2) setnilV(top++);
518 setgcV(L, top, o, ~o->gch.gct);
519 L->top = top+1;
520 errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */
521 hook_restore(g, oldh);
522 if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
523 g->gc.threshold = oldt; /* Restore GC threshold. */
524 if (errcode) {
525 ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */
526 lj_vmevent_send(L, ERRFIN,
527 copyTV(L, L->top++, restorestack(L, errobj));
529 L->top--;
533 /* Finalize one userdata or cdata object from the mmudata list. */
534 static void gc_finalize(lua_State *L)
536 global_State *g = G(L);
537 GCobj *o = gcnext(gcref(g->gc.mmudata));
538 cTValue *mo;
539 lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace");
540 /* Unchain from list of userdata to be finalized. */
541 if (o == gcref(g->gc.mmudata))
542 setgcrefnull(g->gc.mmudata);
543 else
544 setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
545 #if LJ_HASFFI
546 if (o->gch.gct == ~LJ_TCDATA) {
547 TValue tmp, *tv;
548 /* Add cdata back to the GC list and make it white. */
549 setgcrefr(o->gch.nextgc, g->gc.root);
550 setgcref(g->gc.root, o);
551 makewhite(g, o);
552 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
553 /* Resolve finalizer. */
554 setcdataV(L, &tmp, gco2cd(o));
555 tv = lj_tab_set(L, tabref(g->gcroot[GCROOT_FFI_FIN]), &tmp);
556 if (!tvisnil(tv)) {
557 g->gc.nocdatafin = 0;
558 copyTV(L, &tmp, tv);
559 setnilV(tv); /* Clear entry in finalizer table. */
560 gc_call_finalizer(g, L, &tmp, o);
562 return;
564 #endif
565 /* Add userdata back to the main userdata list and make it white. */
566 setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
567 setgcref(mainthread(g)->nextgc, o);
568 makewhite(g, o);
569 /* Resolve the __gc metamethod. */
570 mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
571 if (mo)
572 gc_call_finalizer(g, L, mo, o);
575 /* Finalize all userdata objects from mmudata list. */
576 void lj_gc_finalize_udata(lua_State *L)
578 while (gcref(G(L)->gc.mmudata) != NULL)
579 gc_finalize(L);
582 #if LJ_HASFFI
583 /* Finalize all cdata objects from finalizer table. */
584 void lj_gc_finalize_cdata(lua_State *L)
586 global_State *g = G(L);
587 GCtab *t = tabref(g->gcroot[GCROOT_FFI_FIN]);
588 Node *node = noderef(t->node);
589 ptrdiff_t i;
590 setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */
591 for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
592 if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
593 GCobj *o = gcV(&node[i].key);
594 TValue tmp;
595 makewhite(g, o);
596 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
597 copyTV(L, &tmp, &node[i].val);
598 setnilV(&node[i].val);
599 gc_call_finalizer(g, L, &tmp, o);
602 #endif
604 /* Free all remaining GC objects. */
605 void lj_gc_freeall(global_State *g)
607 MSize i, strmask;
608 /* Free everything, except super-fixed objects (the main thread). */
609 g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
610 gc_fullsweep(g, &g->gc.root);
611 strmask = g->str.mask;
612 for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
613 gc_sweepstr(g, &g->str.tab[i]);
616 /* -- Collector ----------------------------------------------------------- */
618 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
619 static void atomic(global_State *g, lua_State *L)
621 size_t udsize;
623 gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
624 gc_propagate_gray(g); /* Propagate any left-overs. */
626 setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
627 setgcrefnull(g->gc.weak);
628 lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white");
629 gc_markobj(g, L); /* Mark running thread. */
630 gc_traverse_curtrace(g); /* Traverse current trace. */
631 gc_mark_gcroot(g); /* Mark GC roots (again). */
632 gc_propagate_gray(g); /* Propagate all of the above. */
634 setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
635 setgcrefnull(g->gc.grayagain);
636 gc_propagate_gray(g); /* Propagate it. */
638 udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
639 gc_mark_mmudata(g); /* Mark them. */
640 udsize += gc_propagate_gray(g); /* And propagate the marks. */
642 /* All marking done, clear weak tables. */
643 gc_clearweak(g, gcref(g->gc.weak));
645 lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */
647 /* Prepare for sweep phase. */
648 g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
649 g->strempty.marked = g->gc.currentwhite;
650 setmref(g->gc.sweep, &g->gc.root);
651 g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */
654 /* GC state machine. Returns a cost estimate for each step performed. */
655 static size_t gc_onestep(lua_State *L)
657 global_State *g = G(L);
658 switch (g->gc.state) {
659 case GCSpause:
660 gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
661 return 0;
662 case GCSpropagate:
663 if (gcref(g->gc.gray) != NULL)
664 return propagatemark(g); /* Propagate one gray object. */
665 g->gc.state = GCSatomic; /* End of mark phase. */
666 return 0;
667 case GCSatomic:
668 if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */
669 return LJ_MAX_MEM;
670 atomic(g, L);
671 g->gc.state = GCSsweepstring; /* Start of sweep phase. */
672 g->gc.sweepstr = 0;
673 return 0;
674 case GCSsweepstring: {
675 GCSize old = g->gc.total;
676 gc_sweepstr(g, &g->str.tab[g->gc.sweepstr++]); /* Sweep one chain. */
677 if (g->gc.sweepstr > g->str.mask)
678 g->gc.state = GCSsweep; /* All string hash chains sweeped. */
679 lj_assertG(old >= g->gc.total, "sweep increased memory");
680 g->gc.estimate -= old - g->gc.total;
681 return GCSWEEPCOST;
683 case GCSsweep: {
684 GCSize old = g->gc.total;
685 setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
686 lj_assertG(old >= g->gc.total, "sweep increased memory");
687 g->gc.estimate -= old - g->gc.total;
688 if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
689 if (g->str.num <= (g->str.mask >> 2) && g->str.mask > LJ_MIN_STRTAB*2-1)
690 lj_str_resize(L, g->str.mask >> 1); /* Shrink string table. */
691 if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
692 g->gc.state = GCSfinalize;
693 #if LJ_HASFFI
694 g->gc.nocdatafin = 1;
695 #endif
696 } else { /* Otherwise skip this phase to help the JIT. */
697 g->gc.state = GCSpause; /* End of GC cycle. */
698 g->gc.debt = 0;
701 return GCSWEEPMAX*GCSWEEPCOST;
703 case GCSfinalize:
704 if (gcref(g->gc.mmudata) != NULL) {
705 GCSize old = g->gc.total;
706 if (tvref(g->jit_base)) /* Don't call finalizers on trace. */
707 return LJ_MAX_MEM;
708 gc_finalize(L); /* Finalize one userdata object. */
709 if (old >= g->gc.total && g->gc.estimate > old - g->gc.total)
710 g->gc.estimate -= old - g->gc.total;
711 if (g->gc.estimate > GCFINALIZECOST)
712 g->gc.estimate -= GCFINALIZECOST;
713 return GCFINALIZECOST;
715 #if LJ_HASFFI
716 if (!g->gc.nocdatafin) lj_tab_rehash(L, tabref(g->gcroot[GCROOT_FFI_FIN]));
717 #endif
718 g->gc.state = GCSpause; /* End of GC cycle. */
719 g->gc.debt = 0;
720 return 0;
721 default:
722 lj_assertG(0, "bad GC state");
723 return 0;
727 /* Perform a limited amount of incremental GC steps. */
728 int LJ_FASTCALL lj_gc_step(lua_State *L)
730 global_State *g = G(L);
731 GCSize lim;
732 int32_t ostate = g->vmstate;
733 setvmstate(g, GC);
734 lim = (GCSTEPSIZE/100) * g->gc.stepmul;
735 if (lim == 0)
736 lim = LJ_MAX_MEM;
737 if (g->gc.total > g->gc.threshold)
738 g->gc.debt += g->gc.total - g->gc.threshold;
739 do {
740 lim -= (GCSize)gc_onestep(L);
741 if (g->gc.state == GCSpause) {
742 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
743 g->vmstate = ostate;
744 return 1; /* Finished a GC cycle. */
746 } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
747 if (g->gc.debt < GCSTEPSIZE) {
748 g->gc.threshold = g->gc.total + GCSTEPSIZE;
749 g->vmstate = ostate;
750 return -1;
751 } else {
752 g->gc.debt -= GCSTEPSIZE;
753 g->gc.threshold = g->gc.total;
754 g->vmstate = ostate;
755 return 0;
759 /* Ditto, but fix the stack top first. */
760 void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
762 if (curr_funcisL(L)) L->top = curr_topL(L);
763 lj_gc_step(L);
766 #if LJ_HASJIT
767 /* Perform multiple GC steps. Called from JIT-compiled code. */
768 int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
770 lua_State *L = gco2th(gcref(g->cur_L));
771 L->base = tvref(G(L)->jit_base);
772 L->top = curr_topL(L);
773 while (steps-- > 0 && lj_gc_step(L) == 0)
775 /* Return 1 to force a trace exit. */
776 return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
778 #endif
780 /* Perform a full GC cycle. */
781 void lj_gc_fullgc(lua_State *L)
783 global_State *g = G(L);
784 int32_t ostate = g->vmstate;
785 setvmstate(g, GC);
786 if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
787 setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */
788 setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
789 setgcrefnull(g->gc.grayagain);
790 setgcrefnull(g->gc.weak);
791 g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
792 g->gc.sweepstr = 0;
794 while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
795 gc_onestep(L); /* Finish sweep. */
796 lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause,
797 "bad GC state");
798 /* Now perform a full GC. */
799 g->gc.state = GCSpause;
800 do { gc_onestep(L); } while (g->gc.state != GCSpause);
801 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
802 g->vmstate = ostate;
805 /* -- Write barriers ------------------------------------------------------ */
807 /* Move the GC propagation frontier forward. */
808 void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
810 lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o),
811 "bad object states for forward barrier");
812 lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
813 "bad GC state");
814 lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table");
815 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
816 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
817 gc_mark(g, v); /* Move frontier forward. */
818 else
819 makewhite(g, o); /* Make it white to avoid the following barrier. */
822 /* Specialized barrier for closed upvalue. Pass &uv->tv. */
823 void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
825 #define TV2MARKED(x) \
826 (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
827 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
828 gc_mark(g, gcV(tv));
829 else
830 TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
831 #undef TV2MARKED
834 /* Close upvalue. Also needs a write barrier. */
835 void lj_gc_closeuv(global_State *g, GCupval *uv)
837 GCobj *o = obj2gco(uv);
838 /* Copy stack slot to upvalue itself and point to the copy. */
839 copyTV(mainthread(g), &uv->tv, uvval(uv));
840 setmref(uv->v, &uv->tv);
841 uv->closed = 1;
842 setgcrefr(o->gch.nextgc, g->gc.root);
843 setgcref(g->gc.root, o);
844 if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
845 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
846 gray2black(o); /* Make it black and preserve invariant. */
847 if (tviswhite(&uv->tv))
848 lj_gc_barrierf(g, o, gcV(&uv->tv));
849 } else {
850 makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
851 lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
852 "bad GC state");
857 #if LJ_HASJIT
858 /* Mark a trace if it's saved during the propagation phase. */
859 void lj_gc_barriertrace(global_State *g, uint32_t traceno)
861 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
862 gc_marktrace(g, traceno);
864 #endif
866 /* -- Allocator ----------------------------------------------------------- */
868 /* Call pluggable memory allocator to allocate or resize a fragment. */
869 void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
871 global_State *g = G(L);
872 lj_assertG((osz == 0) == (p == NULL), "realloc API violation");
873 p = g->allocf(g->allocd, p, osz, nsz);
874 if (p == NULL && nsz > 0)
875 lj_err_mem(L);
876 lj_assertG((nsz == 0) == (p == NULL), "allocf API violation");
877 lj_assertG(checkptrGC(p),
878 "allocated memory address %p outside required range", p);
879 g->gc.total = (g->gc.total - osz) + nsz;
880 return p;
883 /* Allocate new GC object and link it to the root set. */
884 void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
886 global_State *g = G(L);
887 GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
888 if (o == NULL)
889 lj_err_mem(L);
890 lj_assertG(checkptrGC(o),
891 "allocated memory address %p outside required range", o);
892 g->gc.total += size;
893 setgcrefr(o->gch.nextgc, g->gc.root);
894 setgcref(g->gc.root, o);
895 newwhite(g, o);
896 return o;
899 /* Resize growable vector. */
900 void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
902 MSize sz = (*szp) << 1;
903 if (sz < LJ_MIN_VECSZ)
904 sz = LJ_MIN_VECSZ;
905 if (sz > lim)
906 sz = lim;
907 p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
908 *szp = sz;
909 return p;