InstCombineShifts.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830
  1. //===- InstCombineShifts.cpp ----------------------------------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements the visitShl, visitLShr, and visitAShr functions.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "InstCombineInternal.h"
  14. #include "llvm/Analysis/ConstantFolding.h"
  15. #include "llvm/Analysis/InstructionSimplify.h"
  16. #include "llvm/IR/IntrinsicInst.h"
  17. #include "llvm/IR/PatternMatch.h"
  18. using namespace llvm;
  19. using namespace PatternMatch;
  20. #define DEBUG_TYPE "instcombine"
  21. Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
  22. assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
  23. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  24. // See if we can fold away this shift.
  25. if (SimplifyDemandedInstructionBits(I))
  26. return &I;
  27. // Try to fold constant and into select arguments.
  28. if (isa<Constant>(Op0))
  29. if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
  30. if (Instruction *R = FoldOpIntoSelect(I, SI))
  31. return R;
  32. if (Constant *CUI = dyn_cast<Constant>(Op1))
  33. if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
  34. return Res;
  35. // X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2.
  36. // Because shifts by negative values (which could occur if A were negative)
  37. // are undefined.
  38. Value *A; const APInt *B;
  39. if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Power2(B)))) {
  40. // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
  41. // demand the sign bit (and many others) here??
  42. Value *Rem = Builder->CreateAnd(A, ConstantInt::get(I.getType(), *B-1),
  43. Op1->getName());
  44. I.setOperand(1, Rem);
  45. return &I;
  46. }
  47. return nullptr;
  48. }
  49. /// CanEvaluateShifted - See if we can compute the specified value, but shifted
  50. /// logically to the left or right by some number of bits. This should return
  51. /// true if the expression can be computed for the same cost as the current
  52. /// expression tree. This is used to eliminate extraneous shifting from things
  53. /// like:
  54. /// %C = shl i128 %A, 64
  55. /// %D = shl i128 %B, 96
  56. /// %E = or i128 %C, %D
  57. /// %F = lshr i128 %E, 64
  58. /// where the client will ask if E can be computed shifted right by 64-bits. If
  59. /// this succeeds, the GetShiftedValue function will be called to produce the
  60. /// value.
  61. static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
  62. InstCombiner &IC, Instruction *CxtI) {
  63. // We can always evaluate constants shifted.
  64. if (isa<Constant>(V))
  65. return true;
  66. Instruction *I = dyn_cast<Instruction>(V);
  67. if (!I) return false;
  68. // If this is the opposite shift, we can directly reuse the input of the shift
  69. // if the needed bits are already zero in the input. This allows us to reuse
  70. // the value which means that we don't care if the shift has multiple uses.
  71. // TODO: Handle opposite shift by exact value.
  72. ConstantInt *CI = nullptr;
  73. if ((isLeftShift && match(I, m_LShr(m_Value(), m_ConstantInt(CI)))) ||
  74. (!isLeftShift && match(I, m_Shl(m_Value(), m_ConstantInt(CI))))) {
  75. if (CI->getZExtValue() == NumBits) {
  76. // TODO: Check that the input bits are already zero with MaskedValueIsZero
  77. #if 0
  78. // If this is a truncate of a logical shr, we can truncate it to a smaller
  79. // lshr iff we know that the bits we would otherwise be shifting in are
  80. // already zeros.
  81. uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
  82. uint32_t BitWidth = Ty->getScalarSizeInBits();
  83. if (MaskedValueIsZero(I->getOperand(0),
  84. APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
  85. CI->getLimitedValue(BitWidth) < BitWidth) {
  86. return CanEvaluateTruncated(I->getOperand(0), Ty);
  87. }
  88. #endif
  89. }
  90. }
  91. // We can't mutate something that has multiple uses: doing so would
  92. // require duplicating the instruction in general, which isn't profitable.
  93. if (!I->hasOneUse()) return false;
  94. switch (I->getOpcode()) {
  95. default: return false;
  96. case Instruction::And:
  97. case Instruction::Or:
  98. case Instruction::Xor:
  99. // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
  100. return CanEvaluateShifted(I->getOperand(0), NumBits, isLeftShift, IC, I) &&
  101. CanEvaluateShifted(I->getOperand(1), NumBits, isLeftShift, IC, I);
  102. case Instruction::Shl: {
  103. // We can often fold the shift into shifts-by-a-constant.
  104. CI = dyn_cast<ConstantInt>(I->getOperand(1));
  105. if (!CI) return false;
  106. // We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
  107. if (isLeftShift) return true;
  108. // We can always turn shl(c)+shr(c) -> and(c2).
  109. if (CI->getValue() == NumBits) return true;
  110. unsigned TypeWidth = I->getType()->getScalarSizeInBits();
  111. // We can turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but it isn't
  112. // profitable unless we know the and'd out bits are already zero.
  113. if (CI->getZExtValue() > NumBits) {
  114. unsigned LowBits = TypeWidth - CI->getZExtValue();
  115. if (IC.MaskedValueIsZero(I->getOperand(0),
  116. APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits,
  117. 0, CxtI))
  118. return true;
  119. }
  120. return false;
  121. }
  122. case Instruction::LShr: {
  123. // We can often fold the shift into shifts-by-a-constant.
  124. CI = dyn_cast<ConstantInt>(I->getOperand(1));
  125. if (!CI) return false;
  126. // We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
  127. if (!isLeftShift) return true;
  128. // We can always turn lshr(c)+shl(c) -> and(c2).
  129. if (CI->getValue() == NumBits) return true;
  130. unsigned TypeWidth = I->getType()->getScalarSizeInBits();
  131. // We can always turn lshr(c1)+shl(c2) -> lshr(c3)+and(c4), but it isn't
  132. // profitable unless we know the and'd out bits are already zero.
  133. if (CI->getValue().ult(TypeWidth) && CI->getZExtValue() > NumBits) {
  134. unsigned LowBits = CI->getZExtValue() - NumBits;
  135. if (IC.MaskedValueIsZero(I->getOperand(0),
  136. APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits,
  137. 0, CxtI))
  138. return true;
  139. }
  140. return false;
  141. }
  142. case Instruction::Select: {
  143. SelectInst *SI = cast<SelectInst>(I);
  144. return CanEvaluateShifted(SI->getTrueValue(), NumBits, isLeftShift,
  145. IC, SI) &&
  146. CanEvaluateShifted(SI->getFalseValue(), NumBits, isLeftShift, IC, SI);
  147. }
  148. case Instruction::PHI: {
  149. // We can change a phi if we can change all operands. Note that we never
  150. // get into trouble with cyclic PHIs here because we only consider
  151. // instructions with a single use.
  152. PHINode *PN = cast<PHINode>(I);
  153. for (Value *IncValue : PN->incoming_values())
  154. if (!CanEvaluateShifted(IncValue, NumBits, isLeftShift,
  155. IC, PN))
  156. return false;
  157. return true;
  158. }
  159. }
  160. }
  161. /// GetShiftedValue - When CanEvaluateShifted returned true for an expression,
  162. /// this value inserts the new computation that produces the shifted value.
  163. static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
  164. InstCombiner &IC, const DataLayout &DL) {
  165. // We can always evaluate constants shifted.
  166. if (Constant *C = dyn_cast<Constant>(V)) {
  167. if (isLeftShift)
  168. V = IC.Builder->CreateShl(C, NumBits);
  169. else
  170. V = IC.Builder->CreateLShr(C, NumBits);
  171. // If we got a constantexpr back, try to simplify it with TD info.
  172. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
  173. V = ConstantFoldConstantExpression(CE, DL, IC.getTargetLibraryInfo());
  174. return V;
  175. }
  176. Instruction *I = cast<Instruction>(V);
  177. IC.Worklist.Add(I);
  178. switch (I->getOpcode()) {
  179. default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
  180. case Instruction::And:
  181. case Instruction::Or:
  182. case Instruction::Xor:
  183. // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
  184. I->setOperand(
  185. 0, GetShiftedValue(I->getOperand(0), NumBits, isLeftShift, IC, DL));
  186. I->setOperand(
  187. 1, GetShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
  188. return I;
  189. case Instruction::Shl: {
  190. BinaryOperator *BO = cast<BinaryOperator>(I);
  191. unsigned TypeWidth = BO->getType()->getScalarSizeInBits();
  192. // We only accept shifts-by-a-constant in CanEvaluateShifted.
  193. ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
  194. // We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
  195. if (isLeftShift) {
  196. // If this is oversized composite shift, then unsigned shifts get 0.
  197. unsigned NewShAmt = NumBits+CI->getZExtValue();
  198. if (NewShAmt >= TypeWidth)
  199. return Constant::getNullValue(I->getType());
  200. BO->setOperand(1, ConstantInt::get(BO->getType(), NewShAmt));
  201. BO->setHasNoUnsignedWrap(false);
  202. BO->setHasNoSignedWrap(false);
  203. return I;
  204. }
  205. // We turn shl(c)+lshr(c) -> and(c2) if the input doesn't already have
  206. // zeros.
  207. if (CI->getValue() == NumBits) {
  208. APInt Mask(APInt::getLowBitsSet(TypeWidth, TypeWidth - NumBits));
  209. V = IC.Builder->CreateAnd(BO->getOperand(0),
  210. ConstantInt::get(BO->getContext(), Mask));
  211. if (Instruction *VI = dyn_cast<Instruction>(V)) {
  212. VI->moveBefore(BO);
  213. VI->takeName(BO);
  214. }
  215. return V;
  216. }
  217. // We turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but only when we know that
  218. // the and won't be needed.
  219. assert(CI->getZExtValue() > NumBits);
  220. BO->setOperand(1, ConstantInt::get(BO->getType(),
  221. CI->getZExtValue() - NumBits));
  222. BO->setHasNoUnsignedWrap(false);
  223. BO->setHasNoSignedWrap(false);
  224. return BO;
  225. }
  226. case Instruction::LShr: {
  227. BinaryOperator *BO = cast<BinaryOperator>(I);
  228. unsigned TypeWidth = BO->getType()->getScalarSizeInBits();
  229. // We only accept shifts-by-a-constant in CanEvaluateShifted.
  230. ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
  231. // We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
  232. if (!isLeftShift) {
  233. // If this is oversized composite shift, then unsigned shifts get 0.
  234. unsigned NewShAmt = NumBits+CI->getZExtValue();
  235. if (NewShAmt >= TypeWidth)
  236. return Constant::getNullValue(BO->getType());
  237. BO->setOperand(1, ConstantInt::get(BO->getType(), NewShAmt));
  238. BO->setIsExact(false);
  239. return I;
  240. }
  241. // We turn lshr(c)+shl(c) -> and(c2) if the input doesn't already have
  242. // zeros.
  243. if (CI->getValue() == NumBits) {
  244. APInt Mask(APInt::getHighBitsSet(TypeWidth, TypeWidth - NumBits));
  245. V = IC.Builder->CreateAnd(I->getOperand(0),
  246. ConstantInt::get(BO->getContext(), Mask));
  247. if (Instruction *VI = dyn_cast<Instruction>(V)) {
  248. VI->moveBefore(I);
  249. VI->takeName(I);
  250. }
  251. return V;
  252. }
  253. // We turn lshr(c1)+shl(c2) -> lshr(c3)+and(c4), but only when we know that
  254. // the and won't be needed.
  255. assert(CI->getZExtValue() > NumBits);
  256. BO->setOperand(1, ConstantInt::get(BO->getType(),
  257. CI->getZExtValue() - NumBits));
  258. BO->setIsExact(false);
  259. return BO;
  260. }
  261. case Instruction::Select:
  262. I->setOperand(
  263. 1, GetShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
  264. I->setOperand(
  265. 2, GetShiftedValue(I->getOperand(2), NumBits, isLeftShift, IC, DL));
  266. return I;
  267. case Instruction::PHI: {
  268. // We can change a phi if we can change all operands. Note that we never
  269. // get into trouble with cyclic PHIs here because we only consider
  270. // instructions with a single use.
  271. PHINode *PN = cast<PHINode>(I);
  272. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
  273. PN->setIncomingValue(i, GetShiftedValue(PN->getIncomingValue(i), NumBits,
  274. isLeftShift, IC, DL));
  275. return PN;
  276. }
  277. }
  278. }
  279. Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
  280. BinaryOperator &I) {
  281. bool isLeftShift = I.getOpcode() == Instruction::Shl;
  282. ConstantInt *COp1 = nullptr;
  283. if (ConstantDataVector *CV = dyn_cast<ConstantDataVector>(Op1))
  284. COp1 = dyn_cast_or_null<ConstantInt>(CV->getSplatValue());
  285. else if (ConstantVector *CV = dyn_cast<ConstantVector>(Op1))
  286. COp1 = dyn_cast_or_null<ConstantInt>(CV->getSplatValue());
  287. else
  288. COp1 = dyn_cast<ConstantInt>(Op1);
  289. if (!COp1)
  290. return nullptr;
  291. // See if we can propagate this shift into the input, this covers the trivial
  292. // cast of lshr(shl(x,c1),c2) as well as other more complex cases.
  293. if (I.getOpcode() != Instruction::AShr &&
  294. CanEvaluateShifted(Op0, COp1->getZExtValue(), isLeftShift, *this, &I)) {
  295. DEBUG(dbgs() << "ICE: GetShiftedValue propagating shift through expression"
  296. " to eliminate shift:\n IN: " << *Op0 << "\n SH: " << I <<"\n");
  297. return ReplaceInstUsesWith(
  298. I, GetShiftedValue(Op0, COp1->getZExtValue(), isLeftShift, *this, DL));
  299. }
  300. // See if we can simplify any instructions used by the instruction whose sole
  301. // purpose is to compute bits we don't care about.
  302. uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();
  303. assert(!COp1->uge(TypeBits) &&
  304. "Shift over the type width should have been removed already");
  305. // ((X*C1) << C2) == (X * (C1 << C2))
  306. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
  307. if (BO->getOpcode() == Instruction::Mul && isLeftShift)
  308. if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
  309. return BinaryOperator::CreateMul(BO->getOperand(0),
  310. ConstantExpr::getShl(BOOp, Op1));
  311. // Try to fold constant and into select arguments.
  312. if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
  313. if (Instruction *R = FoldOpIntoSelect(I, SI))
  314. return R;
  315. if (isa<PHINode>(Op0))
  316. if (Instruction *NV = FoldOpIntoPhi(I))
  317. return NV;
  318. // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
  319. if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
  320. Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
  321. // If 'shift2' is an ashr, we would have to get the sign bit into a funny
  322. // place. Don't try to do this transformation in this case. Also, we
  323. // require that the input operand is a shift-by-constant so that we have
  324. // confidence that the shifts will get folded together. We could do this
  325. // xform in more cases, but it is unlikely to be profitable.
  326. if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
  327. isa<ConstantInt>(TrOp->getOperand(1))) {
  328. // Okay, we'll do this xform. Make the shift of shift.
  329. Constant *ShAmt = ConstantExpr::getZExt(COp1, TrOp->getType());
  330. // (shift2 (shift1 & 0x00FF), c2)
  331. Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());
  332. // For logical shifts, the truncation has the effect of making the high
  333. // part of the register be zeros. Emulate this by inserting an AND to
  334. // clear the top bits as needed. This 'and' will usually be zapped by
  335. // other xforms later if dead.
  336. unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
  337. unsigned DstSize = TI->getType()->getScalarSizeInBits();
  338. APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
  339. // The mask we constructed says what the trunc would do if occurring
  340. // between the shifts. We want to know the effect *after* the second
  341. // shift. We know that it is a logical shift by a constant, so adjust the
  342. // mask as appropriate.
  343. if (I.getOpcode() == Instruction::Shl)
  344. MaskV <<= COp1->getZExtValue();
  345. else {
  346. assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
  347. MaskV = MaskV.lshr(COp1->getZExtValue());
  348. }
  349. // shift1 & 0x00FF
  350. Value *And = Builder->CreateAnd(NSh,
  351. ConstantInt::get(I.getContext(), MaskV),
  352. TI->getName());
  353. // Return the value truncated to the interesting size.
  354. return new TruncInst(And, I.getType());
  355. }
  356. }
  357. if (Op0->hasOneUse()) {
  358. if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
  359. // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
  360. Value *V1, *V2;
  361. ConstantInt *CC;
  362. switch (Op0BO->getOpcode()) {
  363. default: break;
  364. case Instruction::Add:
  365. case Instruction::And:
  366. case Instruction::Or:
  367. case Instruction::Xor: {
  368. // These operators commute.
  369. // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
  370. if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
  371. match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
  372. m_Specific(Op1)))) {
  373. Value *YS = // (Y << C)
  374. Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
  375. // (X + (Y << C))
  376. Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
  377. Op0BO->getOperand(1)->getName());
  378. uint32_t Op1Val = COp1->getLimitedValue(TypeBits);
  379. APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
  380. Constant *Mask = ConstantInt::get(I.getContext(), Bits);
  381. if (VectorType *VT = dyn_cast<VectorType>(X->getType()))
  382. Mask = ConstantVector::getSplat(VT->getNumElements(), Mask);
  383. return BinaryOperator::CreateAnd(X, Mask);
  384. }
  385. // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
  386. Value *Op0BOOp1 = Op0BO->getOperand(1);
  387. if (isLeftShift && Op0BOOp1->hasOneUse() &&
  388. match(Op0BOOp1,
  389. m_And(m_OneUse(m_Shr(m_Value(V1), m_Specific(Op1))),
  390. m_ConstantInt(CC)))) {
  391. Value *YS = // (Y << C)
  392. Builder->CreateShl(Op0BO->getOperand(0), Op1,
  393. Op0BO->getName());
  394. // X & (CC << C)
  395. Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
  396. V1->getName()+".mask");
  397. return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
  398. }
  399. }
  400. // FALL THROUGH.
  401. case Instruction::Sub: {
  402. // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
  403. if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
  404. match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
  405. m_Specific(Op1)))) {
  406. Value *YS = // (Y << C)
  407. Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
  408. // (X + (Y << C))
  409. Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
  410. Op0BO->getOperand(0)->getName());
  411. uint32_t Op1Val = COp1->getLimitedValue(TypeBits);
  412. APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
  413. Constant *Mask = ConstantInt::get(I.getContext(), Bits);
  414. if (VectorType *VT = dyn_cast<VectorType>(X->getType()))
  415. Mask = ConstantVector::getSplat(VT->getNumElements(), Mask);
  416. return BinaryOperator::CreateAnd(X, Mask);
  417. }
  418. // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
  419. if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
  420. match(Op0BO->getOperand(0),
  421. m_And(m_OneUse(m_Shr(m_Value(V1), m_Value(V2))),
  422. m_ConstantInt(CC))) && V2 == Op1) {
  423. Value *YS = // (Y << C)
  424. Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
  425. // X & (CC << C)
  426. Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
  427. V1->getName()+".mask");
  428. return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
  429. }
  430. break;
  431. }
  432. }
  433. // If the operand is a bitwise operator with a constant RHS, and the
  434. // shift is the only use, we can pull it out of the shift.
  435. if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
  436. bool isValid = true; // Valid only for And, Or, Xor
  437. bool highBitSet = false; // Transform if high bit of constant set?
  438. switch (Op0BO->getOpcode()) {
  439. default: isValid = false; break; // Do not perform transform!
  440. case Instruction::Add:
  441. isValid = isLeftShift;
  442. break;
  443. case Instruction::Or:
  444. case Instruction::Xor:
  445. highBitSet = false;
  446. break;
  447. case Instruction::And:
  448. highBitSet = true;
  449. break;
  450. }
  451. // If this is a signed shift right, and the high bit is modified
  452. // by the logical operation, do not perform the transformation.
  453. // The highBitSet boolean indicates the value of the high bit of
  454. // the constant which would cause it to be modified for this
  455. // operation.
  456. //
  457. if (isValid && I.getOpcode() == Instruction::AShr)
  458. isValid = Op0C->getValue()[TypeBits-1] == highBitSet;
  459. if (isValid) {
  460. Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
  461. Value *NewShift =
  462. Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
  463. NewShift->takeName(Op0BO);
  464. return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
  465. NewRHS);
  466. }
  467. }
  468. }
  469. }
  470. // Find out if this is a shift of a shift by a constant.
  471. BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
  472. if (ShiftOp && !ShiftOp->isShift())
  473. ShiftOp = nullptr;
  474. if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
  475. // This is a constant shift of a constant shift. Be careful about hiding
  476. // shl instructions behind bit masks. They are used to represent multiplies
  477. // by a constant, and it is important that simple arithmetic expressions
  478. // are still recognizable by scalar evolution.
  479. //
  480. // The transforms applied to shl are very similar to the transforms applied
  481. // to mul by constant. We can be more aggressive about optimizing right
  482. // shifts.
  483. //
  484. // Combinations of right and left shifts will still be optimized in
  485. // DAGCombine where scalar evolution no longer applies.
  486. ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
  487. uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
  488. uint32_t ShiftAmt2 = COp1->getLimitedValue(TypeBits);
  489. assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
  490. if (ShiftAmt1 == 0) return nullptr; // Will be simplified in the future.
  491. Value *X = ShiftOp->getOperand(0);
  492. IntegerType *Ty = cast<IntegerType>(I.getType());
  493. // Check for (X << c1) << c2 and (X >> c1) >> c2
  494. if (I.getOpcode() == ShiftOp->getOpcode()) {
  495. uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
  496. // If this is oversized composite shift, then unsigned shifts get 0, ashr
  497. // saturates.
  498. if (AmtSum >= TypeBits) {
  499. if (I.getOpcode() != Instruction::AShr)
  500. return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
  501. AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr.
  502. }
  503. return BinaryOperator::Create(I.getOpcode(), X,
  504. ConstantInt::get(Ty, AmtSum));
  505. }
  506. if (ShiftAmt1 == ShiftAmt2) {
  507. // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
  508. if (I.getOpcode() == Instruction::LShr &&
  509. ShiftOp->getOpcode() == Instruction::Shl) {
  510. APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
  511. return BinaryOperator::CreateAnd(X,
  512. ConstantInt::get(I.getContext(), Mask));
  513. }
  514. } else if (ShiftAmt1 < ShiftAmt2) {
  515. uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
  516. // (X >>?,exact C1) << C2 --> X << (C2-C1)
  517. // The inexact version is deferred to DAGCombine so we don't hide shl
  518. // behind a bit mask.
  519. if (I.getOpcode() == Instruction::Shl &&
  520. ShiftOp->getOpcode() != Instruction::Shl &&
  521. ShiftOp->isExact()) {
  522. assert(ShiftOp->getOpcode() == Instruction::LShr ||
  523. ShiftOp->getOpcode() == Instruction::AShr);
  524. ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
  525. BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
  526. X, ShiftDiffCst);
  527. NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
  528. NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
  529. return NewShl;
  530. }
  531. // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
  532. if (I.getOpcode() == Instruction::LShr &&
  533. ShiftOp->getOpcode() == Instruction::Shl) {
  534. ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
  535. // (X <<nuw C1) >>u C2 --> X >>u (C2-C1)
  536. if (ShiftOp->hasNoUnsignedWrap()) {
  537. BinaryOperator *NewLShr = BinaryOperator::Create(Instruction::LShr,
  538. X, ShiftDiffCst);
  539. NewLShr->setIsExact(I.isExact());
  540. return NewLShr;
  541. }
  542. Value *Shift = Builder->CreateLShr(X, ShiftDiffCst);
  543. APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
  544. return BinaryOperator::CreateAnd(Shift,
  545. ConstantInt::get(I.getContext(),Mask));
  546. }
  547. // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
  548. // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
  549. if (I.getOpcode() == Instruction::AShr &&
  550. ShiftOp->getOpcode() == Instruction::Shl) {
  551. if (ShiftOp->hasNoSignedWrap()) {
  552. // (X <<nsw C1) >>s C2 --> X >>s (C2-C1)
  553. ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
  554. BinaryOperator *NewAShr = BinaryOperator::Create(Instruction::AShr,
  555. X, ShiftDiffCst);
  556. NewAShr->setIsExact(I.isExact());
  557. return NewAShr;
  558. }
  559. }
  560. } else {
  561. assert(ShiftAmt2 < ShiftAmt1);
  562. uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
  563. // (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
  564. // The inexact version is deferred to DAGCombine so we don't hide shl
  565. // behind a bit mask.
  566. if (I.getOpcode() == Instruction::Shl &&
  567. ShiftOp->getOpcode() != Instruction::Shl &&
  568. ShiftOp->isExact()) {
  569. ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
  570. BinaryOperator *NewShr = BinaryOperator::Create(ShiftOp->getOpcode(),
  571. X, ShiftDiffCst);
  572. NewShr->setIsExact(true);
  573. return NewShr;
  574. }
  575. // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
  576. if (I.getOpcode() == Instruction::LShr &&
  577. ShiftOp->getOpcode() == Instruction::Shl) {
  578. ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
  579. if (ShiftOp->hasNoUnsignedWrap()) {
  580. // (X <<nuw C1) >>u C2 --> X <<nuw (C1-C2)
  581. BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
  582. X, ShiftDiffCst);
  583. NewShl->setHasNoUnsignedWrap(true);
  584. return NewShl;
  585. }
  586. Value *Shift = Builder->CreateShl(X, ShiftDiffCst);
  587. APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
  588. return BinaryOperator::CreateAnd(Shift,
  589. ConstantInt::get(I.getContext(),Mask));
  590. }
  591. // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
  592. // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
  593. if (I.getOpcode() == Instruction::AShr &&
  594. ShiftOp->getOpcode() == Instruction::Shl) {
  595. if (ShiftOp->hasNoSignedWrap()) {
  596. // (X <<nsw C1) >>s C2 --> X <<nsw (C1-C2)
  597. ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
  598. BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
  599. X, ShiftDiffCst);
  600. NewShl->setHasNoSignedWrap(true);
  601. return NewShl;
  602. }
  603. }
  604. }
  605. }
  606. return nullptr;
  607. }
  608. Instruction *InstCombiner::visitShl(BinaryOperator &I) {
  609. if (Value *V = SimplifyVectorOp(I))
  610. return ReplaceInstUsesWith(I, V);
  611. if (Value *V =
  612. SimplifyShlInst(I.getOperand(0), I.getOperand(1), I.hasNoSignedWrap(),
  613. I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
  614. return ReplaceInstUsesWith(I, V);
  615. if (Instruction *V = commonShiftTransforms(I))
  616. return V;
  617. if (ConstantInt *Op1C = dyn_cast<ConstantInt>(I.getOperand(1))) {
  618. unsigned ShAmt = Op1C->getZExtValue();
  619. // If the shifted-out value is known-zero, then this is a NUW shift.
  620. if (!I.hasNoUnsignedWrap() &&
  621. MaskedValueIsZero(I.getOperand(0),
  622. APInt::getHighBitsSet(Op1C->getBitWidth(), ShAmt),
  623. 0, &I)) {
  624. I.setHasNoUnsignedWrap();
  625. return &I;
  626. }
  627. // If the shifted out value is all signbits, this is a NSW shift.
  628. if (!I.hasNoSignedWrap() &&
  629. ComputeNumSignBits(I.getOperand(0), 0, &I) > ShAmt) {
  630. I.setHasNoSignedWrap();
  631. return &I;
  632. }
  633. }
  634. // (C1 << A) << C2 -> (C1 << C2) << A
  635. Constant *C1, *C2;
  636. Value *A;
  637. if (match(I.getOperand(0), m_OneUse(m_Shl(m_Constant(C1), m_Value(A)))) &&
  638. match(I.getOperand(1), m_Constant(C2)))
  639. return BinaryOperator::CreateShl(ConstantExpr::getShl(C1, C2), A);
  640. return nullptr;
  641. }
  642. Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
  643. if (Value *V = SimplifyVectorOp(I))
  644. return ReplaceInstUsesWith(I, V);
  645. if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
  646. DL, TLI, DT, AC))
  647. return ReplaceInstUsesWith(I, V);
  648. if (Instruction *R = commonShiftTransforms(I))
  649. return R;
  650. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  651. if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
  652. unsigned ShAmt = Op1C->getZExtValue();
  653. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op0)) {
  654. unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
  655. // ctlz.i32(x)>>5 --> zext(x == 0)
  656. // cttz.i32(x)>>5 --> zext(x == 0)
  657. // ctpop.i32(x)>>5 --> zext(x == -1)
  658. if ((II->getIntrinsicID() == Intrinsic::ctlz ||
  659. II->getIntrinsicID() == Intrinsic::cttz ||
  660. II->getIntrinsicID() == Intrinsic::ctpop) &&
  661. isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmt) {
  662. bool isCtPop = II->getIntrinsicID() == Intrinsic::ctpop;
  663. Constant *RHS = ConstantInt::getSigned(Op0->getType(), isCtPop ? -1:0);
  664. Value *Cmp = Builder->CreateICmpEQ(II->getArgOperand(0), RHS);
  665. return new ZExtInst(Cmp, II->getType());
  666. }
  667. }
  668. // If the shifted-out value is known-zero, then this is an exact shift.
  669. if (!I.isExact() &&
  670. MaskedValueIsZero(Op0, APInt::getLowBitsSet(Op1C->getBitWidth(), ShAmt),
  671. 0, &I)){
  672. I.setIsExact();
  673. return &I;
  674. }
  675. }
  676. return nullptr;
  677. }
  678. Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
  679. if (Value *V = SimplifyVectorOp(I))
  680. return ReplaceInstUsesWith(I, V);
  681. if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
  682. DL, TLI, DT, AC))
  683. return ReplaceInstUsesWith(I, V);
  684. if (Instruction *R = commonShiftTransforms(I))
  685. return R;
  686. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  687. if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
  688. unsigned ShAmt = Op1C->getZExtValue();
  689. // If the input is a SHL by the same constant (ashr (shl X, C), C), then we
  690. // have a sign-extend idiom.
  691. Value *X;
  692. if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1)))) {
  693. // If the input is an extension from the shifted amount value, e.g.
  694. // %x = zext i8 %A to i32
  695. // %y = shl i32 %x, 24
  696. // %z = ashr %y, 24
  697. // then turn this into "z = sext i8 A to i32".
  698. if (ZExtInst *ZI = dyn_cast<ZExtInst>(X)) {
  699. uint32_t SrcBits = ZI->getOperand(0)->getType()->getScalarSizeInBits();
  700. uint32_t DestBits = ZI->getType()->getScalarSizeInBits();
  701. if (Op1C->getZExtValue() == DestBits-SrcBits)
  702. return new SExtInst(ZI->getOperand(0), ZI->getType());
  703. }
  704. }
  705. // If the shifted-out value is known-zero, then this is an exact shift.
  706. if (!I.isExact() &&
  707. MaskedValueIsZero(Op0,APInt::getLowBitsSet(Op1C->getBitWidth(),ShAmt),
  708. 0, &I)){
  709. I.setIsExact();
  710. return &I;
  711. }
  712. }
  713. // See if we can turn a signed shr into an unsigned shr.
  714. if (MaskedValueIsZero(Op0,
  715. APInt::getSignBit(I.getType()->getScalarSizeInBits()),
  716. 0, &I))
  717. return BinaryOperator::CreateLShr(Op0, Op1);
  718. return nullptr;
  719. }