lj_tab.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*
  2. ** Table handling.
  3. ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
  4. **
  5. ** Major portions taken verbatim or adapted from the Lua interpreter.
  6. ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
  7. */
  8. #define lj_tab_c
  9. #define LUA_CORE
  10. #include "lj_obj.h"
  11. #include "lj_gc.h"
  12. #include "lj_err.h"
  13. #include "lj_tab.h"
  14. /* -- Object hashing ------------------------------------------------------ */
  15. /* Hash an arbitrary key and return its anchor position in the hash table. */
  16. static Node *hashkey(const GCtab *t, cTValue *key)
  17. {
  18. lj_assertX(!tvisint(key), "attempt to hash integer");
  19. if (tvisstr(key))
  20. return hashstr(t, strV(key));
  21. else if (tvisnum(key))
  22. return hashnum(t, key);
  23. else if (tvisbool(key))
  24. return hashmask(t, boolV(key));
  25. else
  26. return hashgcref(t, key->gcr);
  27. /* Only hash 32 bits of lightuserdata on a 64 bit CPU. Good enough? */
  28. }
  29. /* -- Table creation and destruction -------------------------------------- */
  30. /* Create new hash part for table. */
  31. static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits)
  32. {
  33. uint32_t hsize;
  34. Node *node;
  35. lj_assertL(hbits != 0, "zero hash size");
  36. if (hbits > LJ_MAX_HBITS)
  37. lj_err_msg(L, LJ_ERR_TABOV);
  38. hsize = 1u << hbits;
  39. node = lj_mem_newvec(L, hsize, Node);
  40. setmref(t->node, node);
  41. setfreetop(t, node, &node[hsize]);
  42. t->hmask = hsize-1;
  43. }
  44. /*
  45. ** Q: Why all of these copies of t->hmask, t->node etc. to local variables?
  46. ** A: Because alias analysis for C is _really_ tough.
  47. ** Even state-of-the-art C compilers won't produce good code without this.
  48. */
  49. /* Clear hash part of table. */
  50. static LJ_AINLINE void clearhpart(GCtab *t)
  51. {
  52. uint32_t i, hmask = t->hmask;
  53. Node *node = noderef(t->node);
  54. lj_assertX(t->hmask != 0, "empty hash part");
  55. for (i = 0; i <= hmask; i++) {
  56. Node *n = &node[i];
  57. setmref(n->next, NULL);
  58. setnilV(&n->key);
  59. setnilV(&n->val);
  60. }
  61. }
  62. /* Clear array part of table. */
  63. static LJ_AINLINE void clearapart(GCtab *t)
  64. {
  65. uint32_t i, asize = t->asize;
  66. TValue *array = tvref(t->array);
  67. for (i = 0; i < asize; i++)
  68. setnilV(&array[i]);
  69. }
  70. /* Create a new table. Note: the slots are not initialized (yet). */
  71. static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits)
  72. {
  73. GCtab *t;
  74. /* First try to colocate the array part. */
  75. if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) {
  76. Node *nilnode;
  77. lj_assertL((sizeof(GCtab) & 7) == 0, "bad GCtab size");
  78. t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize));
  79. t->gct = ~LJ_TTAB;
  80. t->nomm = (uint8_t)~0;
  81. t->colo = (int8_t)asize;
  82. setmref(t->array, (TValue *)((char *)t + sizeof(GCtab)));
  83. setgcrefnull(t->metatable);
  84. t->asize = asize;
  85. t->hmask = 0;
  86. nilnode = &G(L)->nilnode;
  87. setmref(t->node, nilnode);
  88. #if LJ_GC64
  89. setmref(t->freetop, nilnode);
  90. #endif
  91. } else { /* Otherwise separately allocate the array part. */
  92. Node *nilnode;
  93. t = lj_mem_newobj(L, GCtab);
  94. t->gct = ~LJ_TTAB;
  95. t->nomm = (uint8_t)~0;
  96. t->colo = 0;
  97. setmref(t->array, NULL);
  98. setgcrefnull(t->metatable);
  99. t->asize = 0; /* In case the array allocation fails. */
  100. t->hmask = 0;
  101. nilnode = &G(L)->nilnode;
  102. setmref(t->node, nilnode);
  103. #if LJ_GC64
  104. setmref(t->freetop, nilnode);
  105. #endif
  106. if (asize > 0) {
  107. if (asize > LJ_MAX_ASIZE)
  108. lj_err_msg(L, LJ_ERR_TABOV);
  109. setmref(t->array, lj_mem_newvec(L, asize, TValue));
  110. t->asize = asize;
  111. }
  112. }
  113. if (hbits)
  114. newhpart(L, t, hbits);
  115. return t;
  116. }
  117. /* Create a new table.
  118. **
  119. ** IMPORTANT NOTE: The API differs from lua_createtable()!
  120. **
  121. ** The array size is non-inclusive. E.g. asize=128 creates array slots
  122. ** for 0..127, but not for 128. If you need slots 1..128, pass asize=129
  123. ** (slot 0 is wasted in this case).
  124. **
  125. ** The hash size is given in hash bits. hbits=0 means no hash part.
  126. ** hbits=1 creates 2 hash slots, hbits=2 creates 4 hash slots and so on.
  127. */
  128. GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits)
  129. {
  130. GCtab *t = newtab(L, asize, hbits);
  131. clearapart(t);
  132. if (t->hmask > 0) clearhpart(t);
  133. return t;
  134. }
  135. /* The API of this function conforms to lua_createtable(). */
  136. GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h)
  137. {
  138. return lj_tab_new(L, (uint32_t)(a > 0 ? a+1 : 0), hsize2hbits(h));
  139. }
  140. #if LJ_HASJIT
  141. GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize)
  142. {
  143. GCtab *t = newtab(L, ahsize & 0xffffff, ahsize >> 24);
  144. clearapart(t);
  145. if (t->hmask > 0) clearhpart(t);
  146. return t;
  147. }
  148. #endif
  149. /* Duplicate a table. */
  150. GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt)
  151. {
  152. GCtab *t;
  153. uint32_t asize, hmask;
  154. t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0);
  155. lj_assertL(kt->asize == t->asize && kt->hmask == t->hmask,
  156. "mismatched size of table and template");
  157. t->nomm = 0; /* Keys with metamethod names may be present. */
  158. asize = kt->asize;
  159. if (asize > 0) {
  160. TValue *array = tvref(t->array);
  161. TValue *karray = tvref(kt->array);
  162. if (asize < 64) { /* An inlined loop beats memcpy for < 512 bytes. */
  163. uint32_t i;
  164. for (i = 0; i < asize; i++)
  165. copyTV(L, &array[i], &karray[i]);
  166. } else {
  167. memcpy(array, karray, asize*sizeof(TValue));
  168. }
  169. }
  170. hmask = kt->hmask;
  171. if (hmask > 0) {
  172. uint32_t i;
  173. Node *node = noderef(t->node);
  174. Node *knode = noderef(kt->node);
  175. ptrdiff_t d = (char *)node - (char *)knode;
  176. setfreetop(t, node, (Node *)((char *)getfreetop(kt, knode) + d));
  177. for (i = 0; i <= hmask; i++) {
  178. Node *kn = &knode[i];
  179. Node *n = &node[i];
  180. Node *next = nextnode(kn);
  181. /* Don't use copyTV here, since it asserts on a copy of a dead key. */
  182. n->val = kn->val; n->key = kn->key;
  183. setmref(n->next, next == NULL? next : (Node *)((char *)next + d));
  184. }
  185. }
  186. return t;
  187. }
  188. /* Clear a table. */
  189. void LJ_FASTCALL lj_tab_clear(GCtab *t)
  190. {
  191. clearapart(t);
  192. if (t->hmask > 0) {
  193. Node *node = noderef(t->node);
  194. setfreetop(t, node, &node[t->hmask+1]);
  195. clearhpart(t);
  196. }
  197. }
  198. /* Free a table. */
  199. void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t)
  200. {
  201. if (t->hmask > 0)
  202. lj_mem_freevec(g, noderef(t->node), t->hmask+1, Node);
  203. if (t->asize > 0 && LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
  204. lj_mem_freevec(g, tvref(t->array), t->asize, TValue);
  205. if (LJ_MAX_COLOSIZE != 0 && t->colo)
  206. lj_mem_free(g, t, sizetabcolo((uint32_t)t->colo & 0x7f));
  207. else
  208. lj_mem_freet(g, t);
  209. }
  210. /* -- Table resizing ------------------------------------------------------ */
  211. /* Resize a table to fit the new array/hash part sizes. */
  212. void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits)
  213. {
  214. Node *oldnode = noderef(t->node);
  215. uint32_t oldasize = t->asize;
  216. uint32_t oldhmask = t->hmask;
  217. if (asize > oldasize) { /* Array part grows? */
  218. TValue *array;
  219. uint32_t i;
  220. if (asize > LJ_MAX_ASIZE)
  221. lj_err_msg(L, LJ_ERR_TABOV);
  222. if (LJ_MAX_COLOSIZE != 0 && t->colo > 0) {
  223. /* A colocated array must be separated and copied. */
  224. TValue *oarray = tvref(t->array);
  225. array = lj_mem_newvec(L, asize, TValue);
  226. t->colo = (int8_t)(t->colo | 0x80); /* Mark as separated (colo < 0). */
  227. for (i = 0; i < oldasize; i++)
  228. copyTV(L, &array[i], &oarray[i]);
  229. } else {
  230. array = (TValue *)lj_mem_realloc(L, tvref(t->array),
  231. oldasize*sizeof(TValue), asize*sizeof(TValue));
  232. }
  233. setmref(t->array, array);
  234. t->asize = asize;
  235. for (i = oldasize; i < asize; i++) /* Clear newly allocated slots. */
  236. setnilV(&array[i]);
  237. }
  238. /* Create new (empty) hash part. */
  239. if (hbits) {
  240. newhpart(L, t, hbits);
  241. clearhpart(t);
  242. } else {
  243. global_State *g = G(L);
  244. setmref(t->node, &g->nilnode);
  245. #if LJ_GC64
  246. setmref(t->freetop, &g->nilnode);
  247. #endif
  248. t->hmask = 0;
  249. }
  250. if (asize < oldasize) { /* Array part shrinks? */
  251. TValue *array = tvref(t->array);
  252. uint32_t i;
  253. t->asize = asize; /* Note: This 'shrinks' even colocated arrays. */
  254. for (i = asize; i < oldasize; i++) /* Reinsert old array values. */
  255. if (!tvisnil(&array[i]))
  256. copyTV(L, lj_tab_setinth(L, t, (int32_t)i), &array[i]);
  257. /* Physically shrink only separated arrays. */
  258. if (LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
  259. setmref(t->array, lj_mem_realloc(L, array,
  260. oldasize*sizeof(TValue), asize*sizeof(TValue)));
  261. }
  262. if (oldhmask > 0) { /* Reinsert pairs from old hash part. */
  263. global_State *g;
  264. uint32_t i;
  265. for (i = 0; i <= oldhmask; i++) {
  266. Node *n = &oldnode[i];
  267. if (!tvisnil(&n->val))
  268. copyTV(L, lj_tab_set(L, t, &n->key), &n->val);
  269. }
  270. g = G(L);
  271. lj_mem_freevec(g, oldnode, oldhmask+1, Node);
  272. }
  273. }
  274. static uint32_t countint(cTValue *key, uint32_t *bins)
  275. {
  276. lj_assertX(!tvisint(key), "bad integer key");
  277. if (tvisnum(key)) {
  278. lua_Number nk = numV(key);
  279. int32_t k = lj_num2int(nk);
  280. if ((uint32_t)k < LJ_MAX_ASIZE && nk == (lua_Number)k) {
  281. bins[(k > 2 ? lj_fls((uint32_t)(k-1)) : 0)]++;
  282. return 1;
  283. }
  284. }
  285. return 0;
  286. }
  287. static uint32_t countarray(const GCtab *t, uint32_t *bins)
  288. {
  289. uint32_t na, b, i;
  290. if (t->asize == 0) return 0;
  291. for (na = i = b = 0; b < LJ_MAX_ABITS; b++) {
  292. uint32_t n, top = 2u << b;
  293. TValue *array;
  294. if (top >= t->asize) {
  295. top = t->asize-1;
  296. if (i > top)
  297. break;
  298. }
  299. array = tvref(t->array);
  300. for (n = 0; i <= top; i++)
  301. if (!tvisnil(&array[i]))
  302. n++;
  303. bins[b] += n;
  304. na += n;
  305. }
  306. return na;
  307. }
  308. static uint32_t counthash(const GCtab *t, uint32_t *bins, uint32_t *narray)
  309. {
  310. uint32_t total, na, i, hmask = t->hmask;
  311. Node *node = noderef(t->node);
  312. for (total = na = 0, i = 0; i <= hmask; i++) {
  313. Node *n = &node[i];
  314. if (!tvisnil(&n->val)) {
  315. na += countint(&n->key, bins);
  316. total++;
  317. }
  318. }
  319. *narray += na;
  320. return total;
  321. }
  322. static uint32_t bestasize(uint32_t bins[], uint32_t *narray)
  323. {
  324. uint32_t b, sum, na = 0, sz = 0, nn = *narray;
  325. for (b = 0, sum = 0; 2*nn > (1u<<b) && sum != nn; b++)
  326. if (bins[b] > 0 && 2*(sum += bins[b]) > (1u<<b)) {
  327. sz = (2u<<b)+1;
  328. na = sum;
  329. }
  330. *narray = sz;
  331. return na;
  332. }
  333. static void rehashtab(lua_State *L, GCtab *t, cTValue *ek)
  334. {
  335. uint32_t bins[LJ_MAX_ABITS];
  336. uint32_t total, asize, na, i;
  337. for (i = 0; i < LJ_MAX_ABITS; i++) bins[i] = 0;
  338. asize = countarray(t, bins);
  339. total = 1 + asize;
  340. total += counthash(t, bins, &asize);
  341. asize += countint(ek, bins);
  342. na = bestasize(bins, &asize);
  343. total -= na;
  344. lj_tab_resize(L, t, asize, hsize2hbits(total));
  345. }
  346. #if LJ_HASFFI
  347. void lj_tab_rehash(lua_State *L, GCtab *t)
  348. {
  349. rehashtab(L, t, niltv(L));
  350. }
  351. #endif
  352. void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize)
  353. {
  354. lj_tab_resize(L, t, nasize+1, t->hmask > 0 ? lj_fls(t->hmask)+1 : 0);
  355. }
  356. /* -- Table getters ------------------------------------------------------- */
  357. cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key)
  358. {
  359. TValue k;
  360. Node *n;
  361. k.n = (lua_Number)key;
  362. n = hashnum(t, &k);
  363. do {
  364. if (tvisnum(&n->key) && n->key.n == k.n)
  365. return &n->val;
  366. } while ((n = nextnode(n)));
  367. return NULL;
  368. }
  369. cTValue *lj_tab_getstr(GCtab *t, const GCstr *key)
  370. {
  371. Node *n = hashstr(t, key);
  372. do {
  373. if (tvisstr(&n->key) && strV(&n->key) == key)
  374. return &n->val;
  375. } while ((n = nextnode(n)));
  376. return NULL;
  377. }
  378. cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key)
  379. {
  380. if (tvisstr(key)) {
  381. cTValue *tv = lj_tab_getstr(t, strV(key));
  382. if (tv)
  383. return tv;
  384. } else if (tvisint(key)) {
  385. cTValue *tv = lj_tab_getint(t, intV(key));
  386. if (tv)
  387. return tv;
  388. } else if (tvisnum(key)) {
  389. lua_Number nk = numV(key);
  390. int32_t k = lj_num2int(nk);
  391. if (nk == (lua_Number)k) {
  392. cTValue *tv = lj_tab_getint(t, k);
  393. if (tv)
  394. return tv;
  395. } else {
  396. goto genlookup; /* Else use the generic lookup. */
  397. }
  398. } else if (!tvisnil(key)) {
  399. Node *n;
  400. genlookup:
  401. n = hashkey(t, key);
  402. do {
  403. if (lj_obj_equal(&n->key, key))
  404. return &n->val;
  405. } while ((n = nextnode(n)));
  406. }
  407. return niltv(L);
  408. }
  409. /* -- Table setters ------------------------------------------------------- */
  410. /* Insert new key. Use Brent's variation to optimize the chain length. */
  411. TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key)
  412. {
  413. Node *n = hashkey(t, key);
  414. if (!tvisnil(&n->val) || t->hmask == 0) {
  415. Node *nodebase = noderef(t->node);
  416. Node *collide, *freenode = getfreetop(t, nodebase);
  417. lj_assertL(freenode >= nodebase && freenode <= nodebase+t->hmask+1,
  418. "bad freenode");
  419. do {
  420. if (freenode == nodebase) { /* No free node found? */
  421. rehashtab(L, t, key); /* Rehash table. */
  422. return lj_tab_set(L, t, key); /* Retry key insertion. */
  423. }
  424. } while (!tvisnil(&(--freenode)->key));
  425. setfreetop(t, nodebase, freenode);
  426. lj_assertL(freenode != &G(L)->nilnode, "store to fallback hash");
  427. collide = hashkey(t, &n->key);
  428. if (collide != n) { /* Colliding node not the main node? */
  429. while (noderef(collide->next) != n) /* Find predecessor. */
  430. collide = nextnode(collide);
  431. setmref(collide->next, freenode); /* Relink chain. */
  432. /* Copy colliding node into free node and free main node. */
  433. freenode->val = n->val;
  434. freenode->key = n->key;
  435. freenode->next = n->next;
  436. setmref(n->next, NULL);
  437. setnilV(&n->val);
  438. /* Rechain pseudo-resurrected string keys with colliding hashes. */
  439. while (nextnode(freenode)) {
  440. Node *nn = nextnode(freenode);
  441. if (!tvisnil(&nn->val) && hashkey(t, &nn->key) == n) {
  442. freenode->next = nn->next;
  443. nn->next = n->next;
  444. setmref(n->next, nn);
  445. /*
  446. ** Rechaining a resurrected string key creates a new dilemma:
  447. ** Another string key may have originally been resurrected via
  448. ** _any_ of the previous nodes as a chain anchor. Including
  449. ** a node that had to be moved, which makes them unreachable.
  450. ** It's not feasible to check for all previous nodes, so rechain
  451. ** any string key that's currently in a non-main positions.
  452. */
  453. while ((nn = nextnode(freenode))) {
  454. if (!tvisnil(&nn->val)) {
  455. Node *mn = hashkey(t, &nn->key);
  456. if (mn != freenode && mn != nn) {
  457. freenode->next = nn->next;
  458. nn->next = mn->next;
  459. setmref(mn->next, nn);
  460. } else {
  461. freenode = nn;
  462. }
  463. } else {
  464. freenode = nn;
  465. }
  466. }
  467. break;
  468. } else {
  469. freenode = nn;
  470. }
  471. }
  472. } else { /* Otherwise use free node. */
  473. setmrefr(freenode->next, n->next); /* Insert into chain. */
  474. setmref(n->next, freenode);
  475. n = freenode;
  476. }
  477. }
  478. n->key.u64 = key->u64;
  479. if (LJ_UNLIKELY(tvismzero(&n->key)))
  480. n->key.u64 = 0;
  481. lj_gc_anybarriert(L, t);
  482. lj_assertL(tvisnil(&n->val), "new hash slot is not empty");
  483. return &n->val;
  484. }
  485. TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key)
  486. {
  487. TValue k;
  488. Node *n;
  489. k.n = (lua_Number)key;
  490. n = hashnum(t, &k);
  491. do {
  492. if (tvisnum(&n->key) && n->key.n == k.n)
  493. return &n->val;
  494. } while ((n = nextnode(n)));
  495. return lj_tab_newkey(L, t, &k);
  496. }
  497. TValue *lj_tab_setstr(lua_State *L, GCtab *t, const GCstr *key)
  498. {
  499. TValue k;
  500. Node *n = hashstr(t, key);
  501. do {
  502. if (tvisstr(&n->key) && strV(&n->key) == key)
  503. return &n->val;
  504. } while ((n = nextnode(n)));
  505. setstrV(L, &k, key);
  506. return lj_tab_newkey(L, t, &k);
  507. }
  508. TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key)
  509. {
  510. Node *n;
  511. t->nomm = 0; /* Invalidate negative metamethod cache. */
  512. if (tvisstr(key)) {
  513. return lj_tab_setstr(L, t, strV(key));
  514. } else if (tvisint(key)) {
  515. return lj_tab_setint(L, t, intV(key));
  516. } else if (tvisnum(key)) {
  517. lua_Number nk = numV(key);
  518. int32_t k = lj_num2int(nk);
  519. if (nk == (lua_Number)k)
  520. return lj_tab_setint(L, t, k);
  521. if (tvisnan(key))
  522. lj_err_msg(L, LJ_ERR_NANIDX);
  523. /* Else use the generic lookup. */
  524. } else if (tvisnil(key)) {
  525. lj_err_msg(L, LJ_ERR_NILIDX);
  526. }
  527. n = hashkey(t, key);
  528. do {
  529. if (lj_obj_equal(&n->key, key))
  530. return &n->val;
  531. } while ((n = nextnode(n)));
  532. return lj_tab_newkey(L, t, key);
  533. }
  534. /* -- Table traversal ----------------------------------------------------- */
  535. /* Table traversal indexes:
  536. **
  537. ** Array key index: [0 .. t->asize-1]
  538. ** Hash key index: [t->asize .. t->asize+t->hmask]
  539. ** Invalid key: ~0
  540. */
  541. /* Get the successor traversal index of a key. */
  542. uint32_t LJ_FASTCALL lj_tab_keyindex(GCtab *t, cTValue *key)
  543. {
  544. TValue tmp;
  545. if (tvisint(key)) {
  546. int32_t k = intV(key);
  547. if ((uint32_t)k < t->asize)
  548. return (uint32_t)k + 1;
  549. setnumV(&tmp, (lua_Number)k);
  550. key = &tmp;
  551. } else if (tvisnum(key)) {
  552. lua_Number nk = numV(key);
  553. int32_t k = lj_num2int(nk);
  554. if ((uint32_t)k < t->asize && nk == (lua_Number)k)
  555. return (uint32_t)k + 1;
  556. }
  557. if (!tvisnil(key)) {
  558. Node *n = hashkey(t, key);
  559. do {
  560. if (lj_obj_equal(&n->key, key))
  561. return t->asize + (uint32_t)((n+1) - noderef(t->node));
  562. } while ((n = nextnode(n)));
  563. if (key->u32.hi == LJ_KEYINDEX) /* Despecialized ITERN while running. */
  564. return key->u32.lo;
  565. return ~0u; /* Invalid key to next. */
  566. }
  567. return 0; /* A nil key starts the traversal. */
  568. }
  569. /* Get the next key/value pair of a table traversal. */
  570. int lj_tab_next(GCtab *t, cTValue *key, TValue *o)
  571. {
  572. uint32_t idx = lj_tab_keyindex(t, key); /* Find successor index of key. */
  573. /* First traverse the array part. */
  574. for (; idx < t->asize; idx++) {
  575. cTValue *a = arrayslot(t, idx);
  576. if (LJ_LIKELY(!tvisnil(a))) {
  577. setintV(o, idx);
  578. o[1] = *a;
  579. return 1;
  580. }
  581. }
  582. idx -= t->asize;
  583. /* Then traverse the hash part. */
  584. for (; idx <= t->hmask; idx++) {
  585. Node *n = &noderef(t->node)[idx];
  586. if (!tvisnil(&n->val)) {
  587. o[0] = n->key;
  588. o[1] = n->val;
  589. return 1;
  590. }
  591. }
  592. return (int32_t)idx < 0 ? -1 : 0; /* Invalid key or end of traversal. */
  593. }
  594. /* -- Table length calculation -------------------------------------------- */
  595. /* Compute table length. Slow path with mixed array/hash lookups. */
  596. LJ_NOINLINE static MSize tab_len_slow(GCtab *t, size_t hi)
  597. {
  598. cTValue *tv;
  599. size_t lo = hi;
  600. hi++;
  601. /* Widening search for an upper bound. */
  602. while ((tv = lj_tab_getint(t, (int32_t)hi)) && !tvisnil(tv)) {
  603. lo = hi;
  604. hi += hi;
  605. if (hi > (size_t)(INT_MAX-2)) { /* Punt and do a linear search. */
  606. lo = 1;
  607. while ((tv = lj_tab_getint(t, (int32_t)lo)) && !tvisnil(tv)) lo++;
  608. return (MSize)(lo - 1);
  609. }
  610. }
  611. /* Binary search to find a non-nil to nil transition. */
  612. while (hi - lo > 1) {
  613. size_t mid = (lo+hi) >> 1;
  614. cTValue *tvb = lj_tab_getint(t, (int32_t)mid);
  615. if (tvb && !tvisnil(tvb)) lo = mid; else hi = mid;
  616. }
  617. return (MSize)lo;
  618. }
  619. /* Compute table length. Fast path. */
  620. MSize LJ_FASTCALL lj_tab_len(GCtab *t)
  621. {
  622. size_t hi = (size_t)t->asize;
  623. if (hi) hi--;
  624. /* In a growing array the last array element is very likely nil. */
  625. if (hi > 0 && LJ_LIKELY(tvisnil(arrayslot(t, hi)))) {
  626. /* Binary search to find a non-nil to nil transition in the array. */
  627. size_t lo = 0;
  628. while (hi - lo > 1) {
  629. size_t mid = (lo+hi) >> 1;
  630. if (tvisnil(arrayslot(t, mid))) hi = mid; else lo = mid;
  631. }
  632. return (MSize)lo;
  633. }
  634. /* Without a hash part, there's an implicit nil after the last element. */
  635. return t->hmask ? tab_len_slow(t, hi) : (MSize)hi;
  636. }
  637. #if LJ_HASJIT
  638. /* Verify hinted table length or compute it. */
  639. MSize LJ_FASTCALL lj_tab_len_hint(GCtab *t, size_t hint)
  640. {
  641. size_t asize = (size_t)t->asize;
  642. cTValue *tv = arrayslot(t, hint);
  643. if (LJ_LIKELY(hint+1 < asize)) {
  644. if (LJ_LIKELY(!tvisnil(tv) && tvisnil(tv+1))) return (MSize)hint;
  645. } else if (hint+1 <= asize && LJ_LIKELY(t->hmask == 0) && !tvisnil(tv)) {
  646. return (MSize)hint;
  647. }
  648. return lj_tab_len(t);
  649. }
  650. #endif