lj_opt_mem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /*
  2. ** Memory access optimizations.
  3. ** AA: Alias Analysis using high-level semantic disambiguation.
  4. ** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
  5. ** DSE: Dead-Store Elimination.
  6. ** Copyright (C) 2005-2009 Mike Pall. See Copyright Notice in luajit.h
  7. */
  8. #define lj_opt_mem_c
  9. #define LUA_CORE
  10. #include "lj_obj.h"
  11. #if LJ_HASJIT
  12. #include "lj_tab.h"
  13. #include "lj_ir.h"
  14. #include "lj_jit.h"
  15. #include "lj_iropt.h"
  16. /* Some local macros to save typing. Undef'd at the end. */
  17. #define IR(ref) (&J->cur.ir[(ref)])
  18. #define fins (&J->fold.ins)
  19. /*
  20. ** Caveat #1: return value is not always a TRef -- only use with tref_ref().
  21. ** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
  22. */
  23. /* Return values from alias analysis. */
  24. typedef enum {
  25. ALIAS_NO, /* The two refs CANNOT alias (exact). */
  26. ALIAS_MAY, /* The two refs MAY alias (inexact). */
  27. ALIAS_MUST /* The two refs MUST alias (exact). */
  28. } AliasRet;
  29. /* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
  30. /* Alias analysis for array and hash access using key-based disambiguation. */
  31. static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
  32. {
  33. IRRef ka = refa->op2;
  34. IRRef kb = refb->op2;
  35. IRIns *keya, *keyb;
  36. if (refa == refb)
  37. return ALIAS_MUST; /* Shortcut for same refs. */
  38. keya = IR(ka);
  39. if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
  40. keyb = IR(kb);
  41. if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
  42. if (ka == kb) {
  43. /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
  44. IRIns *ta = refa;
  45. IRIns *tb = refb;
  46. if (ta->o == IR_HREFK || ta->o == IR_AREF) ta = IR(ta->op1);
  47. if (tb->o == IR_HREFK || tb->o == IR_AREF) tb = IR(tb->op1);
  48. if (ta->op1 == tb->op1)
  49. return ALIAS_MUST; /* Same key, same table. */
  50. else
  51. return ALIAS_MAY; /* Same key, possibly different table. */
  52. }
  53. if (irref_isk(ka) && irref_isk(kb))
  54. return ALIAS_NO; /* Different constant keys. */
  55. if (refa->o == IR_AREF) {
  56. /* Disambiguate array references based on index arithmetic. */
  57. lua_assert(refb->o == IR_AREF);
  58. if (refa->op1 == refb->op1) {
  59. /* Same table, different non-const array keys. */
  60. int32_t ofsa = 0, ofsb = 0;
  61. IRRef basea = ka, baseb = kb;
  62. /* Gather base and offset from t[base] or t[base+-ofs]. */
  63. if (keya->o == IR_ADD && irref_isk(keya->op2)) {
  64. basea = keya->op1;
  65. ofsa = IR(keya->op2)->i;
  66. if (basea == kb && ofsa != 0)
  67. return ALIAS_NO; /* t[base+-ofs] vs. t[base]. */
  68. }
  69. if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
  70. baseb = keyb->op1;
  71. ofsb = IR(keyb->op2)->i;
  72. if (ka == baseb && ofsb != 0)
  73. return ALIAS_NO; /* t[base] vs. t[base+-ofs]. */
  74. }
  75. if (basea == baseb && ofsa != ofsb)
  76. return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
  77. }
  78. } else {
  79. /* Disambiguate hash references based on the type of their keys. */
  80. lua_assert((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
  81. (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF));
  82. if (!irt_sametype(keya->t, keyb->t))
  83. return ALIAS_NO; /* Different key types. */
  84. }
  85. return ALIAS_MAY; /* Anything else: we just don't know. */
  86. }
  87. /* Array and hash load forwarding. */
  88. static TRef fwd_ahload(jit_State *J, IRRef xref)
  89. {
  90. IRIns *xr = IR(xref);
  91. IRRef lim = xref; /* Search limit. */
  92. IRRef ref;
  93. /* Search for conflicting stores. */
  94. ref = J->chain[fins->o+IRDELTA_L2S];
  95. while (ref > xref) {
  96. IRIns *store = IR(ref);
  97. switch (aa_ahref(J, xr, IR(store->op1))) {
  98. case ALIAS_NO: break; /* Continue searching. */
  99. case ALIAS_MAY: lim = ref; goto conflict; /* Limit search for load. */
  100. case ALIAS_MUST: return store->op2; /* Store forwarding. */
  101. }
  102. ref = store->prev;
  103. }
  104. /* No conflicting store (yet): const-fold loads from allocations. */
  105. {
  106. IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
  107. IRRef tab = ir->op1;
  108. ir = IR(tab);
  109. if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) {
  110. /* A NEWREF with a number key may end up pointing to the array part.
  111. ** But it's referenced from HSTORE and not found in the ASTORE chain.
  112. ** For now simply consider this a conflict without forwarding anything.
  113. */
  114. if (xr->o == IR_AREF) {
  115. IRRef ref2 = J->chain[IR_NEWREF];
  116. while (ref2 > tab) {
  117. IRIns *newref = IR(ref2);
  118. if (irt_isnum(IR(newref->op2)->t))
  119. goto conflict;
  120. ref2 = newref->prev;
  121. }
  122. }
  123. /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
  124. ** But the above search for conflicting stores was limited by xref.
  125. ** So continue searching, limited by the TNEW/TDUP. Store forwarding
  126. ** is ok, too. A conflict does NOT limit the search for a matching load.
  127. */
  128. while (ref > tab) {
  129. IRIns *store = IR(ref);
  130. switch (aa_ahref(J, xr, IR(store->op1))) {
  131. case ALIAS_NO: break; /* Continue searching. */
  132. case ALIAS_MAY: goto conflict; /* Conflicting store. */
  133. case ALIAS_MUST: return store->op2; /* Store forwarding. */
  134. }
  135. ref = store->prev;
  136. }
  137. lua_assert(ir->o != IR_TNEW || irt_isnil(fins->t));
  138. if (irt_ispri(fins->t)) {
  139. return TREF_PRI(irt_type(fins->t));
  140. } else if (irt_isnum(fins->t) || irt_isstr(fins->t)) {
  141. TValue keyv;
  142. cTValue *tv;
  143. IRIns *key = IR(xr->op2);
  144. if (key->o == IR_KSLOT) key = IR(key->op1);
  145. lj_ir_kvalue(J->L, &keyv, key);
  146. tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
  147. lua_assert(itype2irt(tv) == irt_type(fins->t));
  148. if (irt_isnum(fins->t))
  149. return lj_ir_knum_nn(J, tv->u64);
  150. else
  151. return lj_ir_kstr(J, strV(tv));
  152. }
  153. /* Othwerwise: don't intern as a constant. */
  154. }
  155. }
  156. conflict:
  157. /* Try to find a matching load. Below the conflicting store, if any. */
  158. ref = J->chain[fins->o];
  159. while (ref > lim) {
  160. IRIns *load = IR(ref);
  161. if (load->op1 == xref)
  162. return ref; /* Load forwarding. */
  163. ref = load->prev;
  164. }
  165. return 0; /* Conflict or no match. */
  166. }
  167. /* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
  168. static TRef fwd_aload_reassoc(jit_State *J)
  169. {
  170. IRIns *irx = IR(fins->op1);
  171. IRIns *key = IR(irx->op2);
  172. if (key->o == IR_ADD && irref_isk(key->op2)) {
  173. IRIns *add2 = IR(key->op1);
  174. if (add2->o == IR_ADD && irref_isk(add2->op2) &&
  175. IR(key->op2)->i == -IR(add2->op2)->i) {
  176. IRRef ref = J->chain[IR_AREF];
  177. IRRef lim = add2->op1;
  178. if (irx->op1 > lim) lim = irx->op1;
  179. while (ref > lim) {
  180. IRIns *ir = IR(ref);
  181. if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
  182. return fwd_ahload(J, ref);
  183. ref = ir->prev;
  184. }
  185. }
  186. }
  187. return 0;
  188. }
  189. /* ALOAD forwarding. */
  190. TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
  191. {
  192. IRRef ref;
  193. if ((ref = fwd_ahload(J, fins->op1)) ||
  194. (ref = fwd_aload_reassoc(J)))
  195. return ref;
  196. return EMITFOLD;
  197. }
  198. /* HLOAD forwarding. */
  199. TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
  200. {
  201. IRRef ref = fwd_ahload(J, fins->op1);
  202. if (ref)
  203. return ref;
  204. return EMITFOLD;
  205. }
  206. /* ASTORE/HSTORE elimination. */
  207. TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
  208. {
  209. IRRef xref = fins->op1; /* xREF reference. */
  210. IRRef val = fins->op2; /* Stored value reference. */
  211. IRIns *xr = IR(xref);
  212. IRRef1 *refp = &J->chain[fins->o];
  213. IRRef ref = *refp;
  214. while (ref > xref) { /* Search for redundant or conflicting stores. */
  215. IRIns *store = IR(ref);
  216. switch (aa_ahref(J, xr, IR(store->op1))) {
  217. case ALIAS_NO:
  218. break; /* Continue searching. */
  219. case ALIAS_MAY: /* Store to MAYBE the same location. */
  220. if (store->op2 != val) /* Conflict if the value is different. */
  221. goto doemit;
  222. break; /* Otherwise continue searching. */
  223. case ALIAS_MUST: /* Store to the same location. */
  224. if (store->op2 == val) /* Same value: drop the new store. */
  225. return DROPFOLD;
  226. /* Different value: try to eliminate the redundant store. */
  227. if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
  228. IRIns *ir;
  229. /* Check for any intervening guards (includes conflicting loads). */
  230. for (ir = IR(J->cur.nins-1); ir > store; ir--)
  231. if (irt_isguard(ir->t))
  232. goto doemit; /* No elimination possible. */
  233. /* Remove redundant store from chain and replace with NOP. */
  234. *refp = store->prev;
  235. store->o = IR_NOP; /* Unchained NOP -- does anybody care? */
  236. store->t.irt = IRT_NIL;
  237. store->op1 = store->op2 = 0;
  238. store->prev = 0;
  239. /* Now emit the new store instead. */
  240. }
  241. goto doemit;
  242. }
  243. ref = *(refp = &store->prev);
  244. }
  245. doemit:
  246. return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
  247. }
  248. /* -- ULOAD forwarding ---------------------------------------------------- */
  249. /* The current alias analysis for upvalues is very simplistic. It only
  250. ** disambiguates between the unique upvalues of the same function.
  251. ** This is good enough for now, since most upvalues are read-only.
  252. **
  253. ** A more precise analysis would be feasible with the help of the parser:
  254. ** generate a unique key for every upvalue, even across all prototypes.
  255. ** Lacking a realistic use-case, it's unclear whether this is beneficial.
  256. */
  257. static AliasRet aa_uref(IRIns *refa, IRIns *refb)
  258. {
  259. if (refa->o != refb->o)
  260. return ALIAS_NO; /* Different UREFx type. */
  261. if (refa->op1 != refb->op1)
  262. return ALIAS_MAY; /* Different function. */
  263. else if (refa->op2 == refb->op2)
  264. return ALIAS_MUST; /* Same function, same upvalue idx. */
  265. else
  266. return ALIAS_NO; /* Same function, different upvalue idx. */
  267. }
  268. /* ULOAD forwarding. */
  269. TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
  270. {
  271. IRRef uref = fins->op1;
  272. IRRef lim = uref; /* Search limit. */
  273. IRIns *xr = IR(uref);
  274. IRRef ref;
  275. /* Search for conflicting stores. */
  276. ref = J->chain[IR_USTORE];
  277. while (ref > uref) {
  278. IRIns *store = IR(ref);
  279. switch (aa_uref(xr, IR(store->op1))) {
  280. case ALIAS_NO: break; /* Continue searching. */
  281. case ALIAS_MAY: lim = ref; goto conflict; /* Limit search for load. */
  282. case ALIAS_MUST: return store->op2; /* Store forwarding. */
  283. }
  284. ref = store->prev;
  285. }
  286. conflict:
  287. /* Try to find a matching load. Below the conflicting store, if any. */
  288. ref = J->chain[IR_ULOAD];
  289. while (ref > lim) {
  290. IRIns *load = IR(ref);
  291. if (load->op1 == uref)
  292. return ref; /* Load forwarding. */
  293. ref = load->prev;
  294. }
  295. return EMITFOLD; /* Conflict or no match. */
  296. }
  297. /* USTORE elimination. */
  298. TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
  299. {
  300. IRRef xref = fins->op1; /* xREF reference. */
  301. IRRef val = fins->op2; /* Stored value reference. */
  302. IRIns *xr = IR(xref);
  303. IRRef1 *refp = &J->chain[IR_USTORE];
  304. IRRef ref = *refp;
  305. while (ref > xref) { /* Search for redundant or conflicting stores. */
  306. IRIns *store = IR(ref);
  307. switch (aa_uref(xr, IR(store->op1))) {
  308. case ALIAS_NO:
  309. break; /* Continue searching. */
  310. case ALIAS_MAY: /* Store to MAYBE the same location. */
  311. if (store->op2 != val) /* Conflict if the value is different. */
  312. goto doemit;
  313. break; /* Otherwise continue searching. */
  314. case ALIAS_MUST: /* Store to the same location. */
  315. if (store->op2 == val) /* Same value: drop the new store. */
  316. return DROPFOLD;
  317. /* Different value: try to eliminate the redundant store. */
  318. if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
  319. IRIns *ir;
  320. /* Check for any intervening guards (includes conflicting loads). */
  321. for (ir = IR(J->cur.nins-1); ir > store; ir--)
  322. if (irt_isguard(ir->t))
  323. goto doemit; /* No elimination possible. */
  324. /* Remove redundant store from chain and replace with NOP. */
  325. *refp = store->prev;
  326. store->o = IR_NOP; /* Unchained NOP -- does anybody care? */
  327. store->t.irt = IRT_NIL;
  328. store->op1 = store->op2 = 0;
  329. store->prev = 0;
  330. /* Now emit the new store instead. */
  331. }
  332. goto doemit;
  333. }
  334. ref = *(refp = &store->prev);
  335. }
  336. doemit:
  337. return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
  338. }
  339. /* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
  340. /* Alias analysis for field access.
  341. ** Field loads are cheap and field stores are rare.
  342. ** Simple disambiguation based on field types is good enough.
  343. */
  344. static AliasRet aa_fref(IRIns *refa, IRIns *refb)
  345. {
  346. if (refa->op2 != refb->op2)
  347. return ALIAS_NO; /* Different fields. */
  348. if (refa->op1 == refb->op1)
  349. return ALIAS_MUST; /* Same field, same object. */
  350. else
  351. return ALIAS_MAY; /* Same field, possibly different object. */
  352. }
  353. /* Only the loads for mutable fields end up here (see FOLD). */
  354. TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
  355. {
  356. IRRef oref = fins->op1; /* Object reference. */
  357. IRRef fid = fins->op2; /* Field ID. */
  358. IRRef lim = oref; /* Search limit. */
  359. IRRef ref;
  360. /* Search for conflicting stores. */
  361. ref = J->chain[IR_FSTORE];
  362. while (ref > oref) {
  363. IRIns *store = IR(ref);
  364. switch (aa_fref(fins, IR(store->op1))) {
  365. case ALIAS_NO: break; /* Continue searching. */
  366. case ALIAS_MAY: lim = ref; goto conflict; /* Limit search for load. */
  367. case ALIAS_MUST: return store->op2; /* Store forwarding. */
  368. }
  369. ref = store->prev;
  370. }
  371. /* No conflicting store: const-fold field loads from allocations. */
  372. if (fid == IRFL_TAB_META) {
  373. IRIns *ir = IR(oref);
  374. if (ir->o == IR_TNEW || ir->o == IR_TDUP)
  375. return lj_ir_knull(J, IRT_TAB);
  376. }
  377. conflict:
  378. /* Try to find a matching load. Below the conflicting store, if any. */
  379. ref = J->chain[IR_FLOAD];
  380. while (ref > lim) {
  381. IRIns *load = IR(ref);
  382. if (load->op1 == oref && load->op2 == fid)
  383. return ref; /* Load forwarding. */
  384. ref = load->prev;
  385. }
  386. return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
  387. }
  388. /* FSTORE elimination. */
  389. TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
  390. {
  391. IRRef fref = fins->op1; /* FREF reference. */
  392. IRRef val = fins->op2; /* Stored value reference. */
  393. IRIns *xr = IR(fref);
  394. IRRef1 *refp = &J->chain[IR_FSTORE];
  395. IRRef ref = *refp;
  396. while (ref > fref) { /* Search for redundant or conflicting stores. */
  397. IRIns *store = IR(ref);
  398. switch (aa_fref(xr, IR(store->op1))) {
  399. case ALIAS_NO:
  400. break; /* Continue searching. */
  401. case ALIAS_MAY:
  402. if (store->op2 != val) /* Conflict if the value is different. */
  403. goto doemit;
  404. break; /* Otherwise continue searching. */
  405. case ALIAS_MUST:
  406. if (store->op2 == val) /* Same value: drop the new store. */
  407. return DROPFOLD;
  408. /* Different value: try to eliminate the redundant store. */
  409. if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
  410. IRIns *ir;
  411. /* Check for any intervening guards or conflicting loads. */
  412. for (ir = IR(J->cur.nins-1); ir > store; ir--)
  413. if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
  414. goto doemit; /* No elimination possible. */
  415. /* Remove redundant store from chain and replace with NOP. */
  416. *refp = store->prev;
  417. store->o = IR_NOP; /* Unchained NOP -- does anybody care? */
  418. store->t.irt = IRT_NIL;
  419. store->op1 = store->op2 = 0;
  420. store->prev = 0;
  421. /* Now emit the new store instead. */
  422. }
  423. goto doemit;
  424. }
  425. ref = *(refp = &store->prev);
  426. }
  427. doemit:
  428. return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
  429. }
  430. /* -- TLEN forwarding ----------------------------------------------------- */
  431. /* This is rather simplistic right now, but better than nothing. */
  432. TRef LJ_FASTCALL lj_opt_fwd_tlen(jit_State *J)
  433. {
  434. IRRef tab = fins->op1; /* Table reference. */
  435. IRRef lim = tab; /* Search limit. */
  436. IRRef ref;
  437. /* Any ASTORE is a conflict and limits the search. */
  438. if (J->chain[IR_ASTORE] > lim) lim = J->chain[IR_ASTORE];
  439. /* Search for conflicting HSTORE with numeric key. */
  440. ref = J->chain[IR_HSTORE];
  441. while (ref > lim) {
  442. IRIns *store = IR(ref);
  443. IRIns *href = IR(store->op1);
  444. IRIns *key = IR(href->op2);
  445. if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
  446. lim = ref; /* Conflicting store found, limits search for TLEN. */
  447. break;
  448. }
  449. ref = store->prev;
  450. }
  451. /* Try to find a matching load. Below the conflicting store, if any. */
  452. ref = J->chain[IR_TLEN];
  453. while (ref > lim) {
  454. IRIns *tlen = IR(ref);
  455. if (tlen->op1 == tab)
  456. return ref; /* Load forwarding. */
  457. ref = tlen->prev;
  458. }
  459. return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
  460. }
  461. /* -- ASTORE/HSTORE previous type analysis -------------------------------- */
  462. /* Check whether the previous value for a table store is non-nil.
  463. ** This can be derived either from a previous store or from a previous
  464. ** load (because all loads from tables perform a type check).
  465. **
  466. ** The result of the analysis can be used to avoid the metatable check
  467. ** and the guard against HREF returning niltv. Both of these are cheap,
  468. ** so let's not spend too much effort on the analysis.
  469. **
  470. ** A result of 1 is exact: previous value CANNOT be nil.
  471. ** A result of 0 is inexact: previous value MAY be nil.
  472. */
  473. int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
  474. {
  475. /* First check stores. */
  476. IRRef ref = J->chain[loadop+IRDELTA_L2S];
  477. while (ref > xref) {
  478. IRIns *store = IR(ref);
  479. if (store->op1 == xref) { /* Same xREF. */
  480. /* A nil store MAY alias, but a non-nil store MUST alias. */
  481. return !irt_isnil(store->t);
  482. } else if (irt_isnil(store->t)) { /* Must check any nil store. */
  483. IRRef skref = IR(store->op1)->op2;
  484. IRRef xkref = IR(xref)->op2;
  485. /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
  486. if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
  487. if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
  488. return 0; /* A nil store with same const key or var key MAY alias. */
  489. /* Different const keys CANNOT alias. */
  490. } /* Different key types CANNOT alias. */
  491. } /* Other non-nil stores MAY alias. */
  492. ref = store->prev;
  493. }
  494. /* Check loads since nothing could be derived from stores. */
  495. ref = J->chain[loadop];
  496. while (ref > xref) {
  497. IRIns *load = IR(ref);
  498. if (load->op1 == xref) { /* Same xREF. */
  499. /* A nil load MAY alias, but a non-nil load MUST alias. */
  500. return !irt_isnil(load->t);
  501. } /* Other non-nil loads MAY alias. */
  502. ref = load->prev;
  503. }
  504. return 0; /* Nothing derived at all, previous value MAY be nil. */
  505. }
  506. /* ------------------------------------------------------------------------ */
  507. #undef IR
  508. #undef fins
  509. #endif