Browse Source

x64: Don't fuse implicitly 32-to-64 extended operands.

Mike Pall 12 years ago
parent
commit
1cd13f6b33
1 changed files with 18 additions and 6 deletions
  1. 18 6
      src/lj_asm_x86.h

+ 18 - 6
src/lj_asm_x86.h

@@ -367,6 +367,18 @@ static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
   return ra_allocref(as, ref, allow);
 }
 
+#if LJ_64
+/* Don't fuse a 32 bit load into a 64 bit operation. */
+static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64)
+{
+  if (is64 && !irt_is64(IR(ref)->t))
+    return ra_alloc1(as, ref, allow);
+  return asm_fuseload(as, ref, allow);
+}
+#else
+#define asm_fuseloadm(as, ref, allow, is64)  asm_fuseload(as, (ref), (allow))
+#endif
+
 /* -- Calls --------------------------------------------------------------- */
 
 /* Count the required number of stack slots for a call. */
@@ -696,7 +708,7 @@ static void asm_conv(ASMState *as, IRIns *ir)
     } else {  /* Integer to FP conversion. */
       Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ?
 		 ra_alloc1(as, lref, RSET_GPR) :
-		 asm_fuseload(as, lref, RSET_GPR);
+		 asm_fuseloadm(as, lref, RSET_GPR, st64);
       if (LJ_64 && st == IRT_U64) {
 	MCLabel l_end = emit_label(as);
 	const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000));
@@ -1829,7 +1841,7 @@ static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
     if (asm_swapops(as, ir)) {
       IRRef tmp = lref; lref = rref; rref = tmp;
     }
-    right = asm_fuseload(as, rref, rset_clear(allow, dest));
+    right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t));
   }
   if (irt_isguard(ir->t))  /* For IR_ADDOV etc. */
     asm_guardcc(as, CC_O);
@@ -1842,7 +1854,7 @@ static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
     emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right);
   } else {  /* IMUL r, r, k. */
     /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
-    Reg left = asm_fuseload(as, lref, RSET_GPR);
+    Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t));
     x86Op xo;
     if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8;
     } else { emit_i32(as, k); xo = XO_IMULi; }
@@ -2109,7 +2121,7 @@ static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
 	  }
 	}
 	as->curins--;  /* Skip to BAND to avoid failing in noconflict(). */
-	right = asm_fuseload(as, irl->op1, allow);
+	right = asm_fuseloadm(as, irl->op1, allow, r64);
 	as->curins++;  /* Undo the above. */
       test_nofuse:
 	asm_guardcc(as, cc);
@@ -2146,7 +2158,7 @@ static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
 	    return;
 	  }  /* Otherwise handle register case as usual. */
 	} else {
-	  left = asm_fuseload(as, lref, RSET_GPR);
+	  left = asm_fuseloadm(as, lref, RSET_GPR, r64);
 	}
 	asm_guardcc(as, cc);
 	if (usetest && left != RID_MRM) {
@@ -2160,7 +2172,7 @@ static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
       }
     } else {
       Reg left = ra_alloc1(as, lref, RSET_GPR);
-      Reg right = asm_fuseload(as, rref, rset_exclude(RSET_GPR, left));
+      Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64);
       asm_guardcc(as, cc);
       emit_mrm(as, XO_CMP, r64 + left, right);
     }