lj_gc.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915
  1. /*
  2. ** Garbage collector.
  3. ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
  4. **
  5. ** Major portions taken verbatim or adapted from the Lua interpreter.
  6. ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
  7. */
  8. #define lj_gc_c
  9. #define LUA_CORE
  10. #include "lj_obj.h"
  11. #include "lj_gc.h"
  12. #include "lj_err.h"
  13. #include "lj_buf.h"
  14. #include "lj_str.h"
  15. #include "lj_tab.h"
  16. #include "lj_func.h"
  17. #include "lj_udata.h"
  18. #include "lj_meta.h"
  19. #include "lj_state.h"
  20. #include "lj_frame.h"
  21. #if LJ_HASFFI
  22. #include "lj_ctype.h"
  23. #include "lj_cdata.h"
  24. #endif
  25. #include "lj_trace.h"
  26. #include "lj_dispatch.h"
  27. #include "lj_vm.h"
  28. #include "lj_vmevent.h"
  29. #define GCSTEPSIZE 1024u
  30. #define GCSWEEPMAX 40
  31. #define GCSWEEPCOST 10
  32. #define GCFINALIZECOST 100
  33. /* Macros to set GCobj colors and flags. */
  34. #define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
  35. #define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
  36. #define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
  37. /* -- Mark phase ---------------------------------------------------------- */
  38. /* Mark a TValue (if needed). */
  39. #define gc_marktv(g, tv) \
  40. { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \
  41. "TValue and GC type mismatch"); \
  42. if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
  43. /* Mark a GCobj (if needed). */
  44. #define gc_markobj(g, o) \
  45. { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
  46. /* Mark a string object. */
  47. #define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
  48. /* Mark a white GCobj. */
  49. static void gc_mark(global_State *g, GCobj *o)
  50. {
  51. int gct = o->gch.gct;
  52. lj_assertG(iswhite(o), "mark of non-white object");
  53. lj_assertG(!isdead(g, o), "mark of dead object");
  54. white2gray(o);
  55. if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
  56. GCtab *mt = tabref(gco2ud(o)->metatable);
  57. gray2black(o); /* Userdata are never gray. */
  58. if (mt) gc_markobj(g, mt);
  59. gc_markobj(g, tabref(gco2ud(o)->env));
  60. if (LJ_HASBUFFER && gco2ud(o)->udtype == UDTYPE_BUFFER) {
  61. SBufExt *sbx = (SBufExt *)uddata(gco2ud(o));
  62. if (sbufiscow(sbx) && gcref(sbx->cowref))
  63. gc_markobj(g, gcref(sbx->cowref));
  64. if (gcref(sbx->dict_str))
  65. gc_markobj(g, gcref(sbx->dict_str));
  66. if (gcref(sbx->dict_mt))
  67. gc_markobj(g, gcref(sbx->dict_mt));
  68. }
  69. } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
  70. GCupval *uv = gco2uv(o);
  71. gc_marktv(g, uvval(uv));
  72. if (uv->closed)
  73. gray2black(o); /* Closed upvalues are never gray. */
  74. } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
  75. lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
  76. gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE,
  77. "bad GC type %d", gct);
  78. setgcrefr(o->gch.gclist, g->gc.gray);
  79. setgcref(g->gc.gray, o);
  80. }
  81. }
  82. /* Mark GC roots. */
  83. static void gc_mark_gcroot(global_State *g)
  84. {
  85. ptrdiff_t i;
  86. for (i = 0; i < GCROOT_MAX; i++)
  87. if (gcref(g->gcroot[i]) != NULL)
  88. gc_markobj(g, gcref(g->gcroot[i]));
  89. }
  90. /* Start a GC cycle and mark the root set. */
  91. static void gc_mark_start(global_State *g)
  92. {
  93. setgcrefnull(g->gc.gray);
  94. setgcrefnull(g->gc.grayagain);
  95. setgcrefnull(g->gc.weak);
  96. gc_markobj(g, mainthread(g));
  97. gc_markobj(g, tabref(mainthread(g)->env));
  98. gc_marktv(g, &g->registrytv);
  99. gc_mark_gcroot(g);
  100. g->gc.state = GCSpropagate;
  101. }
  102. /* Mark open upvalues. */
  103. static void gc_mark_uv(global_State *g)
  104. {
  105. GCupval *uv;
  106. for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
  107. lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
  108. "broken upvalue chain");
  109. if (isgray(obj2gco(uv)))
  110. gc_marktv(g, uvval(uv));
  111. }
  112. }
  113. /* Mark userdata in mmudata list. */
  114. static void gc_mark_mmudata(global_State *g)
  115. {
  116. GCobj *root = gcref(g->gc.mmudata);
  117. GCobj *u = root;
  118. if (u) {
  119. do {
  120. u = gcnext(u);
  121. makewhite(g, u); /* Could be from previous GC. */
  122. gc_mark(g, u);
  123. } while (u != root);
  124. }
  125. }
  126. /* Separate userdata objects to be finalized to mmudata list. */
  127. size_t lj_gc_separateudata(global_State *g, int all)
  128. {
  129. size_t m = 0;
  130. GCRef *p = &mainthread(g)->nextgc;
  131. GCobj *o;
  132. while ((o = gcref(*p)) != NULL) {
  133. if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
  134. p = &o->gch.nextgc; /* Nothing to do. */
  135. } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
  136. markfinalized(o); /* Done, as there's no __gc metamethod. */
  137. p = &o->gch.nextgc;
  138. } else { /* Otherwise move userdata to be finalized to mmudata list. */
  139. m += sizeudata(gco2ud(o));
  140. markfinalized(o);
  141. *p = o->gch.nextgc;
  142. if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
  143. GCobj *root = gcref(g->gc.mmudata);
  144. setgcrefr(o->gch.nextgc, root->gch.nextgc);
  145. setgcref(root->gch.nextgc, o);
  146. setgcref(g->gc.mmudata, o);
  147. } else { /* Create circular list. */
  148. setgcref(o->gch.nextgc, o);
  149. setgcref(g->gc.mmudata, o);
  150. }
  151. }
  152. }
  153. return m;
  154. }
  155. /* -- Propagation phase --------------------------------------------------- */
  156. /* Traverse a table. */
  157. static int gc_traverse_tab(global_State *g, GCtab *t)
  158. {
  159. int weak = 0;
  160. cTValue *mode;
  161. GCtab *mt = tabref(t->metatable);
  162. if (mt)
  163. gc_markobj(g, mt);
  164. mode = lj_meta_fastg(g, mt, MM_mode);
  165. if (mode && tvisstr(mode)) { /* Valid __mode field? */
  166. const char *modestr = strVdata(mode);
  167. int c;
  168. while ((c = *modestr++)) {
  169. if (c == 'k') weak |= LJ_GC_WEAKKEY;
  170. else if (c == 'v') weak |= LJ_GC_WEAKVAL;
  171. }
  172. if (weak) { /* Weak tables are cleared in the atomic phase. */
  173. #if LJ_HASFFI
  174. CTState *cts = ctype_ctsG(g);
  175. if (cts && cts->finalizer == t) {
  176. weak = (int)(~0u & ~LJ_GC_WEAKVAL);
  177. } else
  178. #endif
  179. {
  180. t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
  181. setgcrefr(t->gclist, g->gc.weak);
  182. setgcref(g->gc.weak, obj2gco(t));
  183. }
  184. }
  185. }
  186. if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
  187. return 1;
  188. if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
  189. MSize i, asize = t->asize;
  190. for (i = 0; i < asize; i++)
  191. gc_marktv(g, arrayslot(t, i));
  192. }
  193. if (t->hmask > 0) { /* Mark hash part. */
  194. Node *node = noderef(t->node);
  195. MSize i, hmask = t->hmask;
  196. for (i = 0; i <= hmask; i++) {
  197. Node *n = &node[i];
  198. if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
  199. lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot");
  200. if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
  201. if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
  202. }
  203. }
  204. }
  205. return weak;
  206. }
  207. /* Traverse a function. */
  208. static void gc_traverse_func(global_State *g, GCfunc *fn)
  209. {
  210. gc_markobj(g, tabref(fn->c.env));
  211. if (isluafunc(fn)) {
  212. uint32_t i;
  213. lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv,
  214. "function upvalues out of range");
  215. gc_markobj(g, funcproto(fn));
  216. for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
  217. gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
  218. } else {
  219. uint32_t i;
  220. for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
  221. gc_marktv(g, &fn->c.upvalue[i]);
  222. }
  223. }
  224. #if LJ_HASJIT
  225. /* Mark a trace. */
  226. static void gc_marktrace(global_State *g, TraceNo traceno)
  227. {
  228. GCobj *o = obj2gco(traceref(G2J(g), traceno));
  229. lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped");
  230. if (iswhite(o)) {
  231. white2gray(o);
  232. setgcrefr(o->gch.gclist, g->gc.gray);
  233. setgcref(g->gc.gray, o);
  234. }
  235. }
  236. /* Traverse a trace. */
  237. static void gc_traverse_trace(global_State *g, GCtrace *T)
  238. {
  239. IRRef ref;
  240. if (T->traceno == 0) return;
  241. for (ref = T->nk; ref < REF_TRUE; ref++) {
  242. IRIns *ir = &T->ir[ref];
  243. if (ir->o == IR_KGC)
  244. gc_markobj(g, ir_kgc(ir));
  245. if (irt_is64(ir->t) && ir->o != IR_KNULL)
  246. ref++;
  247. }
  248. if (T->link) gc_marktrace(g, T->link);
  249. if (T->nextroot) gc_marktrace(g, T->nextroot);
  250. if (T->nextside) gc_marktrace(g, T->nextside);
  251. gc_markobj(g, gcref(T->startpt));
  252. }
  253. /* The current trace is a GC root while not anchored in the prototype (yet). */
  254. #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
  255. #else
  256. #define gc_traverse_curtrace(g) UNUSED(g)
  257. #endif
  258. /* Traverse a prototype. */
  259. static void gc_traverse_proto(global_State *g, GCproto *pt)
  260. {
  261. ptrdiff_t i;
  262. gc_mark_str(proto_chunkname(pt));
  263. for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
  264. gc_markobj(g, proto_kgc(pt, i));
  265. #if LJ_HASJIT
  266. if (pt->trace) gc_marktrace(g, pt->trace);
  267. #endif
  268. }
  269. /* Traverse the frame structure of a stack. */
  270. static MSize gc_traverse_frames(global_State *g, lua_State *th)
  271. {
  272. TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
  273. /* Note: extra vararg frame not skipped, marks function twice (harmless). */
  274. for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
  275. GCfunc *fn = frame_func(frame);
  276. TValue *ftop = frame;
  277. if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
  278. if (ftop > top) top = ftop;
  279. if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */
  280. }
  281. top++; /* Correct bias of -1 (frame == base-1). */
  282. if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
  283. return (MSize)(top - bot); /* Return minimum needed stack size. */
  284. }
  285. /* Traverse a thread object. */
  286. static void gc_traverse_thread(global_State *g, lua_State *th)
  287. {
  288. TValue *o, *top = th->top;
  289. for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
  290. gc_marktv(g, o);
  291. if (g->gc.state == GCSatomic) {
  292. top = tvref(th->stack) + th->stacksize;
  293. for (; o < top; o++) /* Clear unmarked slots. */
  294. setnilV(o);
  295. }
  296. gc_markobj(g, tabref(th->env));
  297. lj_state_shrinkstack(th, gc_traverse_frames(g, th));
  298. }
  299. /* Propagate one gray object. Traverse it and turn it black. */
  300. static size_t propagatemark(global_State *g)
  301. {
  302. GCobj *o = gcref(g->gc.gray);
  303. int gct = o->gch.gct;
  304. lj_assertG(isgray(o), "propagation of non-gray object");
  305. gray2black(o);
  306. setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
  307. if (LJ_LIKELY(gct == ~LJ_TTAB)) {
  308. GCtab *t = gco2tab(o);
  309. if (gc_traverse_tab(g, t) > 0)
  310. black2gray(o); /* Keep weak tables gray. */
  311. return sizeof(GCtab) + sizeof(TValue) * t->asize +
  312. (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0);
  313. } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
  314. GCfunc *fn = gco2func(o);
  315. gc_traverse_func(g, fn);
  316. return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
  317. sizeCfunc((MSize)fn->c.nupvalues);
  318. } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
  319. GCproto *pt = gco2pt(o);
  320. gc_traverse_proto(g, pt);
  321. return pt->sizept;
  322. } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
  323. lua_State *th = gco2th(o);
  324. setgcrefr(th->gclist, g->gc.grayagain);
  325. setgcref(g->gc.grayagain, o);
  326. black2gray(o); /* Threads are never black. */
  327. gc_traverse_thread(g, th);
  328. return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
  329. } else {
  330. #if LJ_HASJIT
  331. GCtrace *T = gco2trace(o);
  332. gc_traverse_trace(g, T);
  333. return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
  334. T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
  335. #else
  336. lj_assertG(0, "bad GC type %d", gct);
  337. return 0;
  338. #endif
  339. }
  340. }
  341. /* Propagate all gray objects. */
  342. static size_t gc_propagate_gray(global_State *g)
  343. {
  344. size_t m = 0;
  345. while (gcref(g->gc.gray) != NULL)
  346. m += propagatemark(g);
  347. return m;
  348. }
  349. /* -- Sweep phase --------------------------------------------------------- */
  350. /* Type of GC free functions. */
  351. typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
  352. /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
  353. static const GCFreeFunc gc_freefunc[] = {
  354. (GCFreeFunc)lj_str_free,
  355. (GCFreeFunc)lj_func_freeuv,
  356. (GCFreeFunc)lj_state_free,
  357. (GCFreeFunc)lj_func_freeproto,
  358. (GCFreeFunc)lj_func_free,
  359. #if LJ_HASJIT
  360. (GCFreeFunc)lj_trace_free,
  361. #else
  362. (GCFreeFunc)0,
  363. #endif
  364. #if LJ_HASFFI
  365. (GCFreeFunc)lj_cdata_free,
  366. #else
  367. (GCFreeFunc)0,
  368. #endif
  369. (GCFreeFunc)lj_tab_free,
  370. (GCFreeFunc)lj_udata_free
  371. };
  372. /* Full sweep of a GC list. */
  373. #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0)
  374. /* Partial sweep of a GC list. */
  375. static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
  376. {
  377. /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
  378. int ow = otherwhite(g);
  379. GCobj *o;
  380. while ((o = gcref(*p)) != NULL && lim-- > 0) {
  381. if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
  382. gc_fullsweep(g, &gco2th(o)->openupval);
  383. if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
  384. lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
  385. "sweep of undead object");
  386. makewhite(g, o); /* Value is alive, change to the current white. */
  387. p = &o->gch.nextgc;
  388. } else { /* Otherwise value is dead, free it. */
  389. lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
  390. "sweep of unlive object");
  391. setgcrefr(*p, o->gch.nextgc);
  392. if (o == gcref(g->gc.root))
  393. setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
  394. gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
  395. }
  396. }
  397. return p;
  398. }
  399. /* Sweep one string interning table chain. Preserves hashalg bit. */
  400. static void gc_sweepstr(global_State *g, GCRef *chain)
  401. {
  402. /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
  403. int ow = otherwhite(g);
  404. uintptr_t u = gcrefu(*chain);
  405. GCRef q;
  406. GCRef *p = &q;
  407. GCobj *o;
  408. setgcrefp(q, (u & ~(uintptr_t)1));
  409. while ((o = gcref(*p)) != NULL) {
  410. if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
  411. lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
  412. "sweep of undead string");
  413. makewhite(g, o); /* String is alive, change to the current white. */
  414. p = &o->gch.nextgc;
  415. } else { /* Otherwise string is dead, free it. */
  416. lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
  417. "sweep of unlive string");
  418. setgcrefr(*p, o->gch.nextgc);
  419. lj_str_free(g, gco2str(o));
  420. }
  421. }
  422. setgcrefp(*chain, (gcrefu(q) | (u & 1)));
  423. }
  424. /* Check whether we can clear a key or a value slot from a table. */
  425. static int gc_mayclear(cTValue *o, int val)
  426. {
  427. if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
  428. if (tvisstr(o)) { /* But strings cannot be used as weak references. */
  429. gc_mark_str(strV(o)); /* And need to be marked. */
  430. return 0;
  431. }
  432. if (iswhite(gcV(o)))
  433. return 1; /* Object is about to be collected. */
  434. if (tvisudata(o) && val && isfinalized(udataV(o)))
  435. return 1; /* Finalized userdata is dropped only from values. */
  436. }
  437. return 0; /* Cannot clear. */
  438. }
  439. /* Clear collected entries from weak tables. */
  440. static void gc_clearweak(global_State *g, GCobj *o)
  441. {
  442. UNUSED(g);
  443. while (o) {
  444. GCtab *t = gco2tab(o);
  445. lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table");
  446. if ((t->marked & LJ_GC_WEAKVAL)) {
  447. MSize i, asize = t->asize;
  448. for (i = 0; i < asize; i++) {
  449. /* Clear array slot when value is about to be collected. */
  450. TValue *tv = arrayslot(t, i);
  451. if (gc_mayclear(tv, 1))
  452. setnilV(tv);
  453. }
  454. }
  455. if (t->hmask > 0) {
  456. Node *node = noderef(t->node);
  457. MSize i, hmask = t->hmask;
  458. for (i = 0; i <= hmask; i++) {
  459. Node *n = &node[i];
  460. /* Clear hash slot when key or value is about to be collected. */
  461. if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
  462. gc_mayclear(&n->val, 1)))
  463. setnilV(&n->val);
  464. }
  465. }
  466. o = gcref(t->gclist);
  467. }
  468. }
  469. /* Call a userdata or cdata finalizer. */
  470. static void gc_call_finalizer(global_State *g, lua_State *L,
  471. cTValue *mo, GCobj *o)
  472. {
  473. /* Save and restore lots of state around the __gc callback. */
  474. uint8_t oldh = hook_save(g);
  475. GCSize oldt = g->gc.threshold;
  476. int errcode;
  477. TValue *top;
  478. lj_trace_abort(g);
  479. hook_entergc(g); /* Disable hooks and new traces during __gc. */
  480. if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
  481. g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
  482. top = L->top;
  483. copyTV(L, top++, mo);
  484. if (LJ_FR2) setnilV(top++);
  485. setgcV(L, top, o, ~o->gch.gct);
  486. L->top = top+1;
  487. errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */
  488. hook_restore(g, oldh);
  489. if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
  490. g->gc.threshold = oldt; /* Restore GC threshold. */
  491. if (errcode) {
  492. ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */
  493. lj_vmevent_send(L, ERRFIN,
  494. copyTV(L, L->top++, restorestack(L, errobj));
  495. );
  496. L->top--;
  497. }
  498. }
  499. /* Finalize one userdata or cdata object from the mmudata list. */
  500. static void gc_finalize(lua_State *L)
  501. {
  502. global_State *g = G(L);
  503. GCobj *o = gcnext(gcref(g->gc.mmudata));
  504. cTValue *mo;
  505. lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace");
  506. /* Unchain from list of userdata to be finalized. */
  507. if (o == gcref(g->gc.mmudata))
  508. setgcrefnull(g->gc.mmudata);
  509. else
  510. setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
  511. #if LJ_HASFFI
  512. if (o->gch.gct == ~LJ_TCDATA) {
  513. TValue tmp, *tv;
  514. /* Add cdata back to the GC list and make it white. */
  515. setgcrefr(o->gch.nextgc, g->gc.root);
  516. setgcref(g->gc.root, o);
  517. makewhite(g, o);
  518. o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
  519. /* Resolve finalizer. */
  520. setcdataV(L, &tmp, gco2cd(o));
  521. tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
  522. if (!tvisnil(tv)) {
  523. g->gc.nocdatafin = 0;
  524. copyTV(L, &tmp, tv);
  525. setnilV(tv); /* Clear entry in finalizer table. */
  526. gc_call_finalizer(g, L, &tmp, o);
  527. }
  528. return;
  529. }
  530. #endif
  531. /* Add userdata back to the main userdata list and make it white. */
  532. setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
  533. setgcref(mainthread(g)->nextgc, o);
  534. makewhite(g, o);
  535. /* Resolve the __gc metamethod. */
  536. mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
  537. if (mo)
  538. gc_call_finalizer(g, L, mo, o);
  539. }
  540. /* Finalize all userdata objects from mmudata list. */
  541. void lj_gc_finalize_udata(lua_State *L)
  542. {
  543. while (gcref(G(L)->gc.mmudata) != NULL)
  544. gc_finalize(L);
  545. }
  546. #if LJ_HASFFI
  547. /* Finalize all cdata objects from finalizer table. */
  548. void lj_gc_finalize_cdata(lua_State *L)
  549. {
  550. global_State *g = G(L);
  551. CTState *cts = ctype_ctsG(g);
  552. if (cts) {
  553. GCtab *t = cts->finalizer;
  554. Node *node = noderef(t->node);
  555. ptrdiff_t i;
  556. setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */
  557. for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
  558. if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
  559. GCobj *o = gcV(&node[i].key);
  560. TValue tmp;
  561. makewhite(g, o);
  562. o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
  563. copyTV(L, &tmp, &node[i].val);
  564. setnilV(&node[i].val);
  565. gc_call_finalizer(g, L, &tmp, o);
  566. }
  567. }
  568. }
  569. #endif
  570. /* Free all remaining GC objects. */
  571. void lj_gc_freeall(global_State *g)
  572. {
  573. MSize i, strmask;
  574. /* Free everything, except super-fixed objects (the main thread). */
  575. g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
  576. gc_fullsweep(g, &g->gc.root);
  577. strmask = g->str.mask;
  578. for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
  579. gc_sweepstr(g, &g->str.tab[i]);
  580. }
  581. /* -- Collector ----------------------------------------------------------- */
  582. /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
  583. static void atomic(global_State *g, lua_State *L)
  584. {
  585. size_t udsize;
  586. gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
  587. gc_propagate_gray(g); /* Propagate any left-overs. */
  588. setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
  589. setgcrefnull(g->gc.weak);
  590. lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white");
  591. gc_markobj(g, L); /* Mark running thread. */
  592. gc_traverse_curtrace(g); /* Traverse current trace. */
  593. gc_mark_gcroot(g); /* Mark GC roots (again). */
  594. gc_propagate_gray(g); /* Propagate all of the above. */
  595. setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
  596. setgcrefnull(g->gc.grayagain);
  597. gc_propagate_gray(g); /* Propagate it. */
  598. udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
  599. gc_mark_mmudata(g); /* Mark them. */
  600. udsize += gc_propagate_gray(g); /* And propagate the marks. */
  601. /* All marking done, clear weak tables. */
  602. gc_clearweak(g, gcref(g->gc.weak));
  603. lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */
  604. /* Prepare for sweep phase. */
  605. g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
  606. g->strempty.marked = g->gc.currentwhite;
  607. setmref(g->gc.sweep, &g->gc.root);
  608. g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */
  609. }
  610. /* GC state machine. Returns a cost estimate for each step performed. */
  611. static size_t gc_onestep(lua_State *L)
  612. {
  613. global_State *g = G(L);
  614. switch (g->gc.state) {
  615. case GCSpause:
  616. gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
  617. return 0;
  618. case GCSpropagate:
  619. if (gcref(g->gc.gray) != NULL)
  620. return propagatemark(g); /* Propagate one gray object. */
  621. g->gc.state = GCSatomic; /* End of mark phase. */
  622. return 0;
  623. case GCSatomic:
  624. if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */
  625. return LJ_MAX_MEM;
  626. atomic(g, L);
  627. g->gc.state = GCSsweepstring; /* Start of sweep phase. */
  628. g->gc.sweepstr = 0;
  629. return 0;
  630. case GCSsweepstring: {
  631. GCSize old = g->gc.total;
  632. gc_sweepstr(g, &g->str.tab[g->gc.sweepstr++]); /* Sweep one chain. */
  633. if (g->gc.sweepstr > g->str.mask)
  634. g->gc.state = GCSsweep; /* All string hash chains sweeped. */
  635. lj_assertG(old >= g->gc.total, "sweep increased memory");
  636. g->gc.estimate -= old - g->gc.total;
  637. return GCSWEEPCOST;
  638. }
  639. case GCSsweep: {
  640. GCSize old = g->gc.total;
  641. setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
  642. lj_assertG(old >= g->gc.total, "sweep increased memory");
  643. g->gc.estimate -= old - g->gc.total;
  644. if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
  645. if (g->str.num <= (g->str.mask >> 2) && g->str.mask > LJ_MIN_STRTAB*2-1)
  646. lj_str_resize(L, g->str.mask >> 1); /* Shrink string table. */
  647. if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
  648. g->gc.state = GCSfinalize;
  649. #if LJ_HASFFI
  650. g->gc.nocdatafin = 1;
  651. #endif
  652. } else { /* Otherwise skip this phase to help the JIT. */
  653. g->gc.state = GCSpause; /* End of GC cycle. */
  654. g->gc.debt = 0;
  655. }
  656. }
  657. return GCSWEEPMAX*GCSWEEPCOST;
  658. }
  659. case GCSfinalize:
  660. if (gcref(g->gc.mmudata) != NULL) {
  661. GCSize old = g->gc.total;
  662. if (tvref(g->jit_base)) /* Don't call finalizers on trace. */
  663. return LJ_MAX_MEM;
  664. gc_finalize(L); /* Finalize one userdata object. */
  665. if (old >= g->gc.total && g->gc.estimate > old - g->gc.total)
  666. g->gc.estimate -= old - g->gc.total;
  667. if (g->gc.estimate > GCFINALIZECOST)
  668. g->gc.estimate -= GCFINALIZECOST;
  669. return GCFINALIZECOST;
  670. }
  671. #if LJ_HASFFI
  672. if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
  673. #endif
  674. g->gc.state = GCSpause; /* End of GC cycle. */
  675. g->gc.debt = 0;
  676. return 0;
  677. default:
  678. lj_assertG(0, "bad GC state");
  679. return 0;
  680. }
  681. }
  682. /* Perform a limited amount of incremental GC steps. */
  683. int LJ_FASTCALL lj_gc_step(lua_State *L)
  684. {
  685. global_State *g = G(L);
  686. GCSize lim;
  687. int32_t ostate = g->vmstate;
  688. setvmstate(g, GC);
  689. lim = (GCSTEPSIZE/100) * g->gc.stepmul;
  690. if (lim == 0)
  691. lim = LJ_MAX_MEM;
  692. if (g->gc.total > g->gc.threshold)
  693. g->gc.debt += g->gc.total - g->gc.threshold;
  694. do {
  695. lim -= (GCSize)gc_onestep(L);
  696. if (g->gc.state == GCSpause) {
  697. g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
  698. g->vmstate = ostate;
  699. return 1; /* Finished a GC cycle. */
  700. }
  701. } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
  702. if (g->gc.debt < GCSTEPSIZE) {
  703. g->gc.threshold = g->gc.total + GCSTEPSIZE;
  704. g->vmstate = ostate;
  705. return -1;
  706. } else {
  707. g->gc.debt -= GCSTEPSIZE;
  708. g->gc.threshold = g->gc.total;
  709. g->vmstate = ostate;
  710. return 0;
  711. }
  712. }
  713. /* Ditto, but fix the stack top first. */
  714. void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
  715. {
  716. if (curr_funcisL(L)) L->top = curr_topL(L);
  717. lj_gc_step(L);
  718. }
  719. #if LJ_HASJIT
  720. /* Perform multiple GC steps. Called from JIT-compiled code. */
  721. int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
  722. {
  723. lua_State *L = gco2th(gcref(g->cur_L));
  724. L->base = tvref(G(L)->jit_base);
  725. L->top = curr_topL(L);
  726. while (steps-- > 0 && lj_gc_step(L) == 0)
  727. ;
  728. /* Return 1 to force a trace exit. */
  729. return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
  730. }
  731. #endif
  732. /* Perform a full GC cycle. */
  733. void lj_gc_fullgc(lua_State *L)
  734. {
  735. global_State *g = G(L);
  736. int32_t ostate = g->vmstate;
  737. setvmstate(g, GC);
  738. if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
  739. setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */
  740. setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
  741. setgcrefnull(g->gc.grayagain);
  742. setgcrefnull(g->gc.weak);
  743. g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
  744. g->gc.sweepstr = 0;
  745. }
  746. while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
  747. gc_onestep(L); /* Finish sweep. */
  748. lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause,
  749. "bad GC state");
  750. /* Now perform a full GC. */
  751. g->gc.state = GCSpause;
  752. do { gc_onestep(L); } while (g->gc.state != GCSpause);
  753. g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
  754. g->vmstate = ostate;
  755. }
  756. /* -- Write barriers ------------------------------------------------------ */
  757. /* Move the GC propagation frontier forward. */
  758. void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
  759. {
  760. lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o),
  761. "bad object states for forward barrier");
  762. lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
  763. "bad GC state");
  764. lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table");
  765. /* Preserve invariant during propagation. Otherwise it doesn't matter. */
  766. if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
  767. gc_mark(g, v); /* Move frontier forward. */
  768. else
  769. makewhite(g, o); /* Make it white to avoid the following barrier. */
  770. }
  771. /* Specialized barrier for closed upvalue. Pass &uv->tv. */
  772. void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
  773. {
  774. #define TV2MARKED(x) \
  775. (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
  776. if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
  777. gc_mark(g, gcV(tv));
  778. else
  779. TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
  780. #undef TV2MARKED
  781. }
  782. /* Close upvalue. Also needs a write barrier. */
  783. void lj_gc_closeuv(global_State *g, GCupval *uv)
  784. {
  785. GCobj *o = obj2gco(uv);
  786. /* Copy stack slot to upvalue itself and point to the copy. */
  787. copyTV(mainthread(g), &uv->tv, uvval(uv));
  788. setmref(uv->v, &uv->tv);
  789. uv->closed = 1;
  790. setgcrefr(o->gch.nextgc, g->gc.root);
  791. setgcref(g->gc.root, o);
  792. if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
  793. if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
  794. gray2black(o); /* Make it black and preserve invariant. */
  795. if (tviswhite(&uv->tv))
  796. lj_gc_barrierf(g, o, gcV(&uv->tv));
  797. } else {
  798. makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
  799. lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
  800. "bad GC state");
  801. }
  802. }
  803. }
  804. #if LJ_HASJIT
  805. /* Mark a trace if it's saved during the propagation phase. */
  806. void lj_gc_barriertrace(global_State *g, uint32_t traceno)
  807. {
  808. if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
  809. gc_marktrace(g, traceno);
  810. }
  811. #endif
  812. /* -- Allocator ----------------------------------------------------------- */
  813. /* Call pluggable memory allocator to allocate or resize a fragment. */
  814. void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
  815. {
  816. global_State *g = G(L);
  817. lj_assertG((osz == 0) == (p == NULL), "realloc API violation");
  818. p = g->allocf(g->allocd, p, osz, nsz);
  819. if (p == NULL && nsz > 0)
  820. lj_err_mem(L);
  821. lj_assertG((nsz == 0) == (p == NULL), "allocf API violation");
  822. lj_assertG(checkptrGC(p),
  823. "allocated memory address %p outside required range", p);
  824. g->gc.total = (g->gc.total - osz) + nsz;
  825. return p;
  826. }
  827. /* Allocate new GC object and link it to the root set. */
  828. void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
  829. {
  830. global_State *g = G(L);
  831. GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
  832. if (o == NULL)
  833. lj_err_mem(L);
  834. lj_assertG(checkptrGC(o),
  835. "allocated memory address %p outside required range", o);
  836. g->gc.total += size;
  837. setgcrefr(o->gch.nextgc, g->gc.root);
  838. setgcref(g->gc.root, o);
  839. newwhite(g, o);
  840. return o;
  841. }
  842. /* Resize growable vector. */
  843. void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
  844. {
  845. MSize sz = (*szp) << 1;
  846. if (sz < LJ_MIN_VECSZ)
  847. sz = LJ_MIN_VECSZ;
  848. if (sz > lim)
  849. sz = lim;
  850. p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
  851. *szp = sz;
  852. return p;
  853. }