Bläddra i källkod

Improve FOLD/CSE of field loads and array/hash refs across NEWREF.

Mike Pall 15 år sedan
förälder
incheckning
38628d93b8
3 ändrade filer med 23 tillägg och 15 borttagningar
  1. 1 0
      src/lj_iropt.h
  2. 8 15
      src/lj_opt_fold.c
  3. 14 0
      src/lj_opt_mem.c

+ 1 - 0
src/lj_iropt.h

@@ -111,6 +111,7 @@ LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J);
 LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J);
 LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J);
 LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim);
 LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref);
 
 /* Dead-store elimination. */

+ 8 - 15
src/lj_opt_fold.c

@@ -1233,23 +1233,15 @@ LJFOLDF(fwd_href_tdup)
 }
 
 /* We can safely FOLD/CSE array/hash refs and field loads, since there
-** are no corresponding stores. But NEWREF may invalidate all of them.
-** Lacking better disambiguation for table references, these optimizations
-** are simply disabled across any NEWREF.
+** are no corresponding stores. But we need to check for any NEWREF with
+** an aliased table, as it may invalidate all of the pointers and fields.
 ** Only HREF needs the NEWREF check -- AREF and HREFK already depend on
 ** FLOADs. And NEWREF itself is treated like a store (see below).
 */
-LJFOLD(HREF any any)
-LJFOLDF(cse_href)
-{
-  TRef tr = lj_opt_cse(J);
-  return tref_ref(tr) < J->chain[IR_NEWREF] ? EMITFOLD : tr;
-}
-
 LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE)
 LJFOLDF(fload_tab_tnew_asize)
 {
-  if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fins->op1 > J->chain[IR_NEWREF])
+  if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
     return INTFOLD(fleft->op1);
   return NEXTFOLD;
 }
@@ -1257,7 +1249,7 @@ LJFOLDF(fload_tab_tnew_asize)
 LJFOLD(FLOAD TNEW IRFL_TAB_HMASK)
 LJFOLDF(fload_tab_tnew_hmask)
 {
-  if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fins->op1 > J->chain[IR_NEWREF])
+  if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
     return INTFOLD((1 << fleft->op2)-1);
   return NEXTFOLD;
 }
@@ -1265,7 +1257,7 @@ LJFOLDF(fload_tab_tnew_hmask)
 LJFOLD(FLOAD TDUP IRFL_TAB_ASIZE)
 LJFOLDF(fload_tab_tdup_asize)
 {
-  if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fins->op1 > J->chain[IR_NEWREF])
+  if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
     return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->asize);
   return NEXTFOLD;
 }
@@ -1273,11 +1265,12 @@ LJFOLDF(fload_tab_tdup_asize)
 LJFOLD(FLOAD TDUP IRFL_TAB_HMASK)
 LJFOLDF(fload_tab_tdup_hmask)
 {
-  if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fins->op1 > J->chain[IR_NEWREF])
+  if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
     return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->hmask);
   return NEXTFOLD;
 }
 
+LJFOLD(HREF any any)
 LJFOLD(FLOAD any IRFL_TAB_ARRAY)
 LJFOLD(FLOAD any IRFL_TAB_NODE)
 LJFOLD(FLOAD any IRFL_TAB_ASIZE)
@@ -1285,7 +1278,7 @@ LJFOLD(FLOAD any IRFL_TAB_HMASK)
 LJFOLDF(fload_tab_ah)
 {
   TRef tr = lj_opt_cse(J);
-  return tref_ref(tr) < J->chain[IR_NEWREF] ? EMITFOLD : tr;
+  return lj_opt_fwd_tptr(J, tref_ref(tr)) ? tr : EMITFOLD;
 }
 
 /* Strings are immutable, so we can safely FOLD/CSE the related FLOAD. */

+ 14 - 0
src/lj_opt_mem.c

@@ -277,6 +277,20 @@ int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
   return 1;  /* No conflict. Can fold to niltv. */
 }
 
+/* Check whether there's no aliasing NEWREF for the left operand. */
+int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
+{
+  IRRef ta = fins->op1;
+  IRRef ref = J->chain[IR_NEWREF];
+  while (ref > lim) {
+    IRIns *newref = IR(ref);
+    if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
+      return 0;  /* Conflict. */
+    ref = newref->prev;
+  }
+  return 1;  /* No conflict. Can safely FOLD/CSE. */
+}
+
 /* ASTORE/HSTORE elimination. */
 TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
 {