InstCombineAndOrXor.cpp 114 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845
  1. //===- InstCombineAndOrXor.cpp --------------------------------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements the visitAnd, visitOr, and visitXor functions.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "InstCombineInternal.h"
  14. #include "llvm/Analysis/InstructionSimplify.h"
  15. #include "llvm/IR/ConstantRange.h"
  16. #include "llvm/IR/Intrinsics.h"
  17. #include "llvm/IR/PatternMatch.h"
  18. #include "llvm/Transforms/Utils/CmpInstAnalysis.h"
  19. using namespace llvm;
  20. using namespace PatternMatch;
  21. #define DEBUG_TYPE "instcombine"
  22. static inline Value *dyn_castNotVal(Value *V) {
  23. // If this is not(not(x)) don't return that this is a not: we want the two
  24. // not's to be folded first.
  25. if (BinaryOperator::isNot(V)) {
  26. Value *Operand = BinaryOperator::getNotArgument(V);
  27. if (!IsFreeToInvert(Operand, Operand->hasOneUse()))
  28. return Operand;
  29. }
  30. // Constants can be considered to be not'ed values...
  31. if (ConstantInt *C = dyn_cast<ConstantInt>(V))
  32. return ConstantInt::get(C->getType(), ~C->getValue());
  33. return nullptr;
  34. }
  35. /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
  36. /// predicate into a three bit mask. It also returns whether it is an ordered
  37. /// predicate by reference.
  38. static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
  39. isOrdered = false;
  40. switch (CC) {
  41. case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
  42. case FCmpInst::FCMP_UNO: return 0; // 000
  43. case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
  44. case FCmpInst::FCMP_UGT: return 1; // 001
  45. case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
  46. case FCmpInst::FCMP_UEQ: return 2; // 010
  47. case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
  48. case FCmpInst::FCMP_UGE: return 3; // 011
  49. case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
  50. case FCmpInst::FCMP_ULT: return 4; // 100
  51. case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
  52. case FCmpInst::FCMP_UNE: return 5; // 101
  53. case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
  54. case FCmpInst::FCMP_ULE: return 6; // 110
  55. // True -> 7
  56. default:
  57. // Not expecting FCMP_FALSE and FCMP_TRUE;
  58. llvm_unreachable("Unexpected FCmp predicate!");
  59. }
  60. }
  61. /// getNewICmpValue - This is the complement of getICmpCode, which turns an
  62. /// opcode and two operands into either a constant true or false, or a brand
  63. /// new ICmp instruction. The sign is passed in to determine which kind
  64. /// of predicate to use in the new icmp instruction.
  65. static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
  66. InstCombiner::BuilderTy *Builder) {
  67. ICmpInst::Predicate NewPred;
  68. if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred))
  69. return NewConstant;
  70. return Builder->CreateICmp(NewPred, LHS, RHS);
  71. }
  72. /// getFCmpValue - This is the complement of getFCmpCode, which turns an
  73. /// opcode and two operands into either a FCmp instruction. isordered is passed
  74. /// in to determine which kind of predicate to use in the new fcmp instruction.
  75. static Value *getFCmpValue(bool isordered, unsigned code,
  76. Value *LHS, Value *RHS,
  77. InstCombiner::BuilderTy *Builder) {
  78. CmpInst::Predicate Pred;
  79. switch (code) {
  80. default: llvm_unreachable("Illegal FCmp code!");
  81. case 0: Pred = isordered ? FCmpInst::FCMP_ORD : FCmpInst::FCMP_UNO; break;
  82. case 1: Pred = isordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; break;
  83. case 2: Pred = isordered ? FCmpInst::FCMP_OEQ : FCmpInst::FCMP_UEQ; break;
  84. case 3: Pred = isordered ? FCmpInst::FCMP_OGE : FCmpInst::FCMP_UGE; break;
  85. case 4: Pred = isordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; break;
  86. case 5: Pred = isordered ? FCmpInst::FCMP_ONE : FCmpInst::FCMP_UNE; break;
  87. case 6: Pred = isordered ? FCmpInst::FCMP_OLE : FCmpInst::FCMP_ULE; break;
  88. case 7:
  89. if (!isordered)
  90. return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
  91. Pred = FCmpInst::FCMP_ORD; break;
  92. }
  93. return Builder->CreateFCmp(Pred, LHS, RHS);
  94. }
  95. /// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) to BSWAP(BITWISE_OP(A, B))
  96. /// \param I Binary operator to transform.
  97. /// \return Pointer to node that must replace the original binary operator, or
  98. /// null pointer if no transformation was made.
  99. Value *InstCombiner::SimplifyBSwap(BinaryOperator &I) {
  100. IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
  101. // Can't do vectors.
  102. if (I.getType()->isVectorTy()) return nullptr;
  103. // Can only do bitwise ops.
  104. unsigned Op = I.getOpcode();
  105. if (Op != Instruction::And && Op != Instruction::Or &&
  106. Op != Instruction::Xor)
  107. return nullptr;
  108. Value *OldLHS = I.getOperand(0);
  109. Value *OldRHS = I.getOperand(1);
  110. ConstantInt *ConstLHS = dyn_cast<ConstantInt>(OldLHS);
  111. ConstantInt *ConstRHS = dyn_cast<ConstantInt>(OldRHS);
  112. IntrinsicInst *IntrLHS = dyn_cast<IntrinsicInst>(OldLHS);
  113. IntrinsicInst *IntrRHS = dyn_cast<IntrinsicInst>(OldRHS);
  114. bool IsBswapLHS = (IntrLHS && IntrLHS->getIntrinsicID() == Intrinsic::bswap);
  115. bool IsBswapRHS = (IntrRHS && IntrRHS->getIntrinsicID() == Intrinsic::bswap);
  116. if (!IsBswapLHS && !IsBswapRHS)
  117. return nullptr;
  118. if (!IsBswapLHS && !ConstLHS)
  119. return nullptr;
  120. if (!IsBswapRHS && !ConstRHS)
  121. return nullptr;
  122. /// OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
  123. /// OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
  124. Value *NewLHS = IsBswapLHS ? IntrLHS->getOperand(0) :
  125. Builder->getInt(ConstLHS->getValue().byteSwap());
  126. Value *NewRHS = IsBswapRHS ? IntrRHS->getOperand(0) :
  127. Builder->getInt(ConstRHS->getValue().byteSwap());
  128. Value *BinOp = nullptr;
  129. if (Op == Instruction::And)
  130. BinOp = Builder->CreateAnd(NewLHS, NewRHS);
  131. else if (Op == Instruction::Or)
  132. BinOp = Builder->CreateOr(NewLHS, NewRHS);
  133. else //if (Op == Instruction::Xor)
  134. BinOp = Builder->CreateXor(NewLHS, NewRHS);
  135. Module *M = I.getParent()->getParent()->getParent();
  136. Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, ITy);
  137. return Builder->CreateCall(F, BinOp);
  138. }
  139. // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
  140. // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
  141. // guaranteed to be a binary operator.
  142. Instruction *InstCombiner::OptAndOp(Instruction *Op,
  143. ConstantInt *OpRHS,
  144. ConstantInt *AndRHS,
  145. BinaryOperator &TheAnd) {
  146. Value *X = Op->getOperand(0);
  147. Constant *Together = nullptr;
  148. if (!Op->isShift())
  149. Together = ConstantExpr::getAnd(AndRHS, OpRHS);
  150. switch (Op->getOpcode()) {
  151. case Instruction::Xor:
  152. if (Op->hasOneUse()) {
  153. // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
  154. Value *And = Builder->CreateAnd(X, AndRHS);
  155. And->takeName(Op);
  156. return BinaryOperator::CreateXor(And, Together);
  157. }
  158. break;
  159. case Instruction::Or:
  160. if (Op->hasOneUse()){
  161. if (Together != OpRHS) {
  162. // (X | C1) & C2 --> (X | (C1&C2)) & C2
  163. Value *Or = Builder->CreateOr(X, Together);
  164. Or->takeName(Op);
  165. return BinaryOperator::CreateAnd(Or, AndRHS);
  166. }
  167. ConstantInt *TogetherCI = dyn_cast<ConstantInt>(Together);
  168. if (TogetherCI && !TogetherCI->isZero()){
  169. // (X | C1) & C2 --> (X & (C2^(C1&C2))) | C1
  170. // NOTE: This reduces the number of bits set in the & mask, which
  171. // can expose opportunities for store narrowing.
  172. Together = ConstantExpr::getXor(AndRHS, Together);
  173. Value *And = Builder->CreateAnd(X, Together);
  174. And->takeName(Op);
  175. return BinaryOperator::CreateOr(And, OpRHS);
  176. }
  177. }
  178. break;
  179. case Instruction::Add:
  180. if (Op->hasOneUse()) {
  181. // Adding a one to a single bit bit-field should be turned into an XOR
  182. // of the bit. First thing to check is to see if this AND is with a
  183. // single bit constant.
  184. const APInt &AndRHSV = AndRHS->getValue();
  185. // If there is only one bit set.
  186. if (AndRHSV.isPowerOf2()) {
  187. // Ok, at this point, we know that we are masking the result of the
  188. // ADD down to exactly one bit. If the constant we are adding has
  189. // no bits set below this bit, then we can eliminate the ADD.
  190. const APInt& AddRHS = OpRHS->getValue();
  191. // Check to see if any bits below the one bit set in AndRHSV are set.
  192. if ((AddRHS & (AndRHSV-1)) == 0) {
  193. // If not, the only thing that can effect the output of the AND is
  194. // the bit specified by AndRHSV. If that bit is set, the effect of
  195. // the XOR is to toggle the bit. If it is clear, then the ADD has
  196. // no effect.
  197. if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
  198. TheAnd.setOperand(0, X);
  199. return &TheAnd;
  200. } else {
  201. // Pull the XOR out of the AND.
  202. Value *NewAnd = Builder->CreateAnd(X, AndRHS);
  203. NewAnd->takeName(Op);
  204. return BinaryOperator::CreateXor(NewAnd, AndRHS);
  205. }
  206. }
  207. }
  208. }
  209. break;
  210. case Instruction::Shl: {
  211. // We know that the AND will not produce any of the bits shifted in, so if
  212. // the anded constant includes them, clear them now!
  213. //
  214. uint32_t BitWidth = AndRHS->getType()->getBitWidth();
  215. uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
  216. APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
  217. ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShlMask);
  218. if (CI->getValue() == ShlMask)
  219. // Masking out bits that the shift already masks.
  220. return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
  221. if (CI != AndRHS) { // Reducing bits set in and.
  222. TheAnd.setOperand(1, CI);
  223. return &TheAnd;
  224. }
  225. break;
  226. }
  227. case Instruction::LShr: {
  228. // We know that the AND will not produce any of the bits shifted in, so if
  229. // the anded constant includes them, clear them now! This only applies to
  230. // unsigned shifts, because a signed shr may bring in set bits!
  231. //
  232. uint32_t BitWidth = AndRHS->getType()->getBitWidth();
  233. uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
  234. APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
  235. ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShrMask);
  236. if (CI->getValue() == ShrMask)
  237. // Masking out bits that the shift already masks.
  238. return ReplaceInstUsesWith(TheAnd, Op);
  239. if (CI != AndRHS) {
  240. TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
  241. return &TheAnd;
  242. }
  243. break;
  244. }
  245. case Instruction::AShr:
  246. // Signed shr.
  247. // See if this is shifting in some sign extension, then masking it out
  248. // with an and.
  249. if (Op->hasOneUse()) {
  250. uint32_t BitWidth = AndRHS->getType()->getBitWidth();
  251. uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
  252. APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
  253. Constant *C = Builder->getInt(AndRHS->getValue() & ShrMask);
  254. if (C == AndRHS) { // Masking out bits shifted in.
  255. // (Val ashr C1) & C2 -> (Val lshr C1) & C2
  256. // Make the argument unsigned.
  257. Value *ShVal = Op->getOperand(0);
  258. ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
  259. return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
  260. }
  261. }
  262. break;
  263. }
  264. return nullptr;
  265. }
  266. /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
  267. /// (V < Lo || V >= Hi). In practice, we emit the more efficient
  268. /// (V-Lo) \<u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
  269. /// whether to treat the V, Lo and HI as signed or not. IB is the location to
  270. /// insert new instructions.
  271. Value *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
  272. bool isSigned, bool Inside) {
  273. assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
  274. ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
  275. "Lo is not <= Hi in range emission code!");
  276. if (Inside) {
  277. if (Lo == Hi) // Trivially false.
  278. return Builder->getFalse();
  279. // V >= Min && V < Hi --> V < Hi
  280. if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
  281. ICmpInst::Predicate pred = (isSigned ?
  282. ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
  283. return Builder->CreateICmp(pred, V, Hi);
  284. }
  285. // Emit V-Lo <u Hi-Lo
  286. Constant *NegLo = ConstantExpr::getNeg(Lo);
  287. Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
  288. Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
  289. return Builder->CreateICmpULT(Add, UpperBound);
  290. }
  291. if (Lo == Hi) // Trivially true.
  292. return Builder->getTrue();
  293. // V < Min || V >= Hi -> V > Hi-1
  294. Hi = SubOne(cast<ConstantInt>(Hi));
  295. if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
  296. ICmpInst::Predicate pred = (isSigned ?
  297. ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
  298. return Builder->CreateICmp(pred, V, Hi);
  299. }
  300. // Emit V-Lo >u Hi-1-Lo
  301. // Note that Hi has already had one subtracted from it, above.
  302. ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
  303. Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
  304. Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
  305. return Builder->CreateICmpUGT(Add, LowerBound);
  306. }
  307. // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
  308. // any number of 0s on either side. The 1s are allowed to wrap from LSB to
  309. // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
  310. // not, since all 1s are not contiguous.
  311. static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
  312. const APInt& V = Val->getValue();
  313. uint32_t BitWidth = Val->getType()->getBitWidth();
  314. if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
  315. // look for the first zero bit after the run of ones
  316. MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
  317. // look for the first non-zero bit
  318. ME = V.getActiveBits();
  319. return true;
  320. }
  321. /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
  322. /// where isSub determines whether the operator is a sub. If we can fold one of
  323. /// the following xforms:
  324. ///
  325. /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
  326. /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
  327. /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
  328. ///
  329. /// return (A +/- B).
  330. ///
  331. Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
  332. ConstantInt *Mask, bool isSub,
  333. Instruction &I) {
  334. Instruction *LHSI = dyn_cast<Instruction>(LHS);
  335. if (!LHSI || LHSI->getNumOperands() != 2 ||
  336. !isa<ConstantInt>(LHSI->getOperand(1))) return nullptr;
  337. ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
  338. switch (LHSI->getOpcode()) {
  339. default: return nullptr;
  340. case Instruction::And:
  341. if (ConstantExpr::getAnd(N, Mask) == Mask) {
  342. // If the AndRHS is a power of two minus one (0+1+), this is simple.
  343. if ((Mask->getValue().countLeadingZeros() +
  344. Mask->getValue().countPopulation()) ==
  345. Mask->getValue().getBitWidth())
  346. break;
  347. // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
  348. // part, we don't need any explicit masks to take them out of A. If that
  349. // is all N is, ignore it.
  350. uint32_t MB = 0, ME = 0;
  351. if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
  352. uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
  353. APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
  354. if (MaskedValueIsZero(RHS, Mask, 0, &I))
  355. break;
  356. }
  357. }
  358. return nullptr;
  359. case Instruction::Or:
  360. case Instruction::Xor:
  361. // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
  362. if ((Mask->getValue().countLeadingZeros() +
  363. Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
  364. && ConstantExpr::getAnd(N, Mask)->isNullValue())
  365. break;
  366. return nullptr;
  367. }
  368. if (isSub)
  369. return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
  370. return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
  371. }
  372. /// enum for classifying (icmp eq (A & B), C) and (icmp ne (A & B), C)
  373. /// One of A and B is considered the mask, the other the value. This is
  374. /// described as the "AMask" or "BMask" part of the enum. If the enum
  375. /// contains only "Mask", then both A and B can be considered masks.
  376. /// If A is the mask, then it was proven, that (A & C) == C. This
  377. /// is trivial if C == A, or C == 0. If both A and C are constants, this
  378. /// proof is also easy.
  379. /// For the following explanations we assume that A is the mask.
  380. /// The part "AllOnes" declares, that the comparison is true only
  381. /// if (A & B) == A, or all bits of A are set in B.
  382. /// Example: (icmp eq (A & 3), 3) -> FoldMskICmp_AMask_AllOnes
  383. /// The part "AllZeroes" declares, that the comparison is true only
  384. /// if (A & B) == 0, or all bits of A are cleared in B.
  385. /// Example: (icmp eq (A & 3), 0) -> FoldMskICmp_Mask_AllZeroes
  386. /// The part "Mixed" declares, that (A & B) == C and C might or might not
  387. /// contain any number of one bits and zero bits.
  388. /// Example: (icmp eq (A & 3), 1) -> FoldMskICmp_AMask_Mixed
  389. /// The Part "Not" means, that in above descriptions "==" should be replaced
  390. /// by "!=".
  391. /// Example: (icmp ne (A & 3), 3) -> FoldMskICmp_AMask_NotAllOnes
  392. /// If the mask A contains a single bit, then the following is equivalent:
  393. /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
  394. /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
  395. enum MaskedICmpType {
  396. FoldMskICmp_AMask_AllOnes = 1,
  397. FoldMskICmp_AMask_NotAllOnes = 2,
  398. FoldMskICmp_BMask_AllOnes = 4,
  399. FoldMskICmp_BMask_NotAllOnes = 8,
  400. FoldMskICmp_Mask_AllZeroes = 16,
  401. FoldMskICmp_Mask_NotAllZeroes = 32,
  402. FoldMskICmp_AMask_Mixed = 64,
  403. FoldMskICmp_AMask_NotMixed = 128,
  404. FoldMskICmp_BMask_Mixed = 256,
  405. FoldMskICmp_BMask_NotMixed = 512
  406. };
  407. /// return the set of pattern classes (from MaskedICmpType)
  408. /// that (icmp SCC (A & B), C) satisfies
  409. static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
  410. ICmpInst::Predicate SCC)
  411. {
  412. ConstantInt *ACst = dyn_cast<ConstantInt>(A);
  413. ConstantInt *BCst = dyn_cast<ConstantInt>(B);
  414. ConstantInt *CCst = dyn_cast<ConstantInt>(C);
  415. bool icmp_eq = (SCC == ICmpInst::ICMP_EQ);
  416. bool icmp_abit = (ACst && !ACst->isZero() &&
  417. ACst->getValue().isPowerOf2());
  418. bool icmp_bbit = (BCst && !BCst->isZero() &&
  419. BCst->getValue().isPowerOf2());
  420. unsigned result = 0;
  421. if (CCst && CCst->isZero()) {
  422. // if C is zero, then both A and B qualify as mask
  423. result |= (icmp_eq ? (FoldMskICmp_Mask_AllZeroes |
  424. FoldMskICmp_Mask_AllZeroes |
  425. FoldMskICmp_AMask_Mixed |
  426. FoldMskICmp_BMask_Mixed)
  427. : (FoldMskICmp_Mask_NotAllZeroes |
  428. FoldMskICmp_Mask_NotAllZeroes |
  429. FoldMskICmp_AMask_NotMixed |
  430. FoldMskICmp_BMask_NotMixed));
  431. if (icmp_abit)
  432. result |= (icmp_eq ? (FoldMskICmp_AMask_NotAllOnes |
  433. FoldMskICmp_AMask_NotMixed)
  434. : (FoldMskICmp_AMask_AllOnes |
  435. FoldMskICmp_AMask_Mixed));
  436. if (icmp_bbit)
  437. result |= (icmp_eq ? (FoldMskICmp_BMask_NotAllOnes |
  438. FoldMskICmp_BMask_NotMixed)
  439. : (FoldMskICmp_BMask_AllOnes |
  440. FoldMskICmp_BMask_Mixed));
  441. return result;
  442. }
  443. if (A == C) {
  444. result |= (icmp_eq ? (FoldMskICmp_AMask_AllOnes |
  445. FoldMskICmp_AMask_Mixed)
  446. : (FoldMskICmp_AMask_NotAllOnes |
  447. FoldMskICmp_AMask_NotMixed));
  448. if (icmp_abit)
  449. result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes |
  450. FoldMskICmp_AMask_NotMixed)
  451. : (FoldMskICmp_Mask_AllZeroes |
  452. FoldMskICmp_AMask_Mixed));
  453. } else if (ACst && CCst &&
  454. ConstantExpr::getAnd(ACst, CCst) == CCst) {
  455. result |= (icmp_eq ? FoldMskICmp_AMask_Mixed
  456. : FoldMskICmp_AMask_NotMixed);
  457. }
  458. if (B == C) {
  459. result |= (icmp_eq ? (FoldMskICmp_BMask_AllOnes |
  460. FoldMskICmp_BMask_Mixed)
  461. : (FoldMskICmp_BMask_NotAllOnes |
  462. FoldMskICmp_BMask_NotMixed));
  463. if (icmp_bbit)
  464. result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes |
  465. FoldMskICmp_BMask_NotMixed)
  466. : (FoldMskICmp_Mask_AllZeroes |
  467. FoldMskICmp_BMask_Mixed));
  468. } else if (BCst && CCst &&
  469. ConstantExpr::getAnd(BCst, CCst) == CCst) {
  470. result |= (icmp_eq ? FoldMskICmp_BMask_Mixed
  471. : FoldMskICmp_BMask_NotMixed);
  472. }
  473. return result;
  474. }
  475. /// Convert an analysis of a masked ICmp into its equivalent if all boolean
  476. /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
  477. /// is adjacent to the corresponding normal flag (recording ==), this just
  478. /// involves swapping those bits over.
  479. static unsigned conjugateICmpMask(unsigned Mask) {
  480. unsigned NewMask;
  481. NewMask = (Mask & (FoldMskICmp_AMask_AllOnes | FoldMskICmp_BMask_AllOnes |
  482. FoldMskICmp_Mask_AllZeroes | FoldMskICmp_AMask_Mixed |
  483. FoldMskICmp_BMask_Mixed))
  484. << 1;
  485. NewMask |=
  486. (Mask & (FoldMskICmp_AMask_NotAllOnes | FoldMskICmp_BMask_NotAllOnes |
  487. FoldMskICmp_Mask_NotAllZeroes | FoldMskICmp_AMask_NotMixed |
  488. FoldMskICmp_BMask_NotMixed))
  489. >> 1;
  490. return NewMask;
  491. }
  492. /// decomposeBitTestICmp - Decompose an icmp into the form ((X & Y) pred Z)
  493. /// if possible. The returned predicate is either == or !=. Returns false if
  494. /// decomposition fails.
  495. static bool decomposeBitTestICmp(const ICmpInst *I, ICmpInst::Predicate &Pred,
  496. Value *&X, Value *&Y, Value *&Z) {
  497. ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1));
  498. if (!C)
  499. return false;
  500. switch (I->getPredicate()) {
  501. default:
  502. return false;
  503. case ICmpInst::ICMP_SLT:
  504. // X < 0 is equivalent to (X & SignBit) != 0.
  505. if (!C->isZero())
  506. return false;
  507. Y = ConstantInt::get(I->getContext(), APInt::getSignBit(C->getBitWidth()));
  508. Pred = ICmpInst::ICMP_NE;
  509. break;
  510. case ICmpInst::ICMP_SGT:
  511. // X > -1 is equivalent to (X & SignBit) == 0.
  512. if (!C->isAllOnesValue())
  513. return false;
  514. Y = ConstantInt::get(I->getContext(), APInt::getSignBit(C->getBitWidth()));
  515. Pred = ICmpInst::ICMP_EQ;
  516. break;
  517. case ICmpInst::ICMP_ULT:
  518. // X <u 2^n is equivalent to (X & ~(2^n-1)) == 0.
  519. if (!C->getValue().isPowerOf2())
  520. return false;
  521. Y = ConstantInt::get(I->getContext(), -C->getValue());
  522. Pred = ICmpInst::ICMP_EQ;
  523. break;
  524. case ICmpInst::ICMP_UGT:
  525. // X >u 2^n-1 is equivalent to (X & ~(2^n-1)) != 0.
  526. if (!(C->getValue() + 1).isPowerOf2())
  527. return false;
  528. Y = ConstantInt::get(I->getContext(), ~C->getValue());
  529. Pred = ICmpInst::ICMP_NE;
  530. break;
  531. }
  532. X = I->getOperand(0);
  533. Z = ConstantInt::getNullValue(C->getType());
  534. return true;
  535. }
  536. /// foldLogOpOfMaskedICmpsHelper:
  537. /// handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
  538. /// return the set of pattern classes (from MaskedICmpType)
  539. /// that both LHS and RHS satisfy
  540. static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
  541. Value*& B, Value*& C,
  542. Value*& D, Value*& E,
  543. ICmpInst *LHS, ICmpInst *RHS,
  544. ICmpInst::Predicate &LHSCC,
  545. ICmpInst::Predicate &RHSCC) {
  546. if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType()) return 0;
  547. // vectors are not (yet?) supported
  548. if (LHS->getOperand(0)->getType()->isVectorTy()) return 0;
  549. // Here comes the tricky part:
  550. // LHS might be of the form L11 & L12 == X, X == L21 & L22,
  551. // and L11 & L12 == L21 & L22. The same goes for RHS.
  552. // Now we must find those components L** and R**, that are equal, so
  553. // that we can extract the parameters A, B, C, D, and E for the canonical
  554. // above.
  555. Value *L1 = LHS->getOperand(0);
  556. Value *L2 = LHS->getOperand(1);
  557. Value *L11,*L12,*L21,*L22;
  558. // Check whether the icmp can be decomposed into a bit test.
  559. if (decomposeBitTestICmp(LHS, LHSCC, L11, L12, L2)) {
  560. L21 = L22 = L1 = nullptr;
  561. } else {
  562. // Look for ANDs in the LHS icmp.
  563. if (!L1->getType()->isIntegerTy()) {
  564. // You can icmp pointers, for example. They really aren't masks.
  565. L11 = L12 = nullptr;
  566. } else if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
  567. // Any icmp can be viewed as being trivially masked; if it allows us to
  568. // remove one, it's worth it.
  569. L11 = L1;
  570. L12 = Constant::getAllOnesValue(L1->getType());
  571. }
  572. if (!L2->getType()->isIntegerTy()) {
  573. // You can icmp pointers, for example. They really aren't masks.
  574. L21 = L22 = nullptr;
  575. } else if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
  576. L21 = L2;
  577. L22 = Constant::getAllOnesValue(L2->getType());
  578. }
  579. }
  580. // Bail if LHS was a icmp that can't be decomposed into an equality.
  581. if (!ICmpInst::isEquality(LHSCC))
  582. return 0;
  583. Value *R1 = RHS->getOperand(0);
  584. Value *R2 = RHS->getOperand(1);
  585. Value *R11,*R12;
  586. bool ok = false;
  587. if (decomposeBitTestICmp(RHS, RHSCC, R11, R12, R2)) {
  588. if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
  589. A = R11; D = R12;
  590. } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
  591. A = R12; D = R11;
  592. } else {
  593. return 0;
  594. }
  595. E = R2; R1 = nullptr; ok = true;
  596. } else if (R1->getType()->isIntegerTy()) {
  597. if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
  598. // As before, model no mask as a trivial mask if it'll let us do an
  599. // optimization.
  600. R11 = R1;
  601. R12 = Constant::getAllOnesValue(R1->getType());
  602. }
  603. if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
  604. A = R11; D = R12; E = R2; ok = true;
  605. } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
  606. A = R12; D = R11; E = R2; ok = true;
  607. }
  608. }
  609. // Bail if RHS was a icmp that can't be decomposed into an equality.
  610. if (!ICmpInst::isEquality(RHSCC))
  611. return 0;
  612. // Look for ANDs in on the right side of the RHS icmp.
  613. if (!ok && R2->getType()->isIntegerTy()) {
  614. if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) {
  615. R11 = R2;
  616. R12 = Constant::getAllOnesValue(R2->getType());
  617. }
  618. if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
  619. A = R11; D = R12; E = R1; ok = true;
  620. } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
  621. A = R12; D = R11; E = R1; ok = true;
  622. } else {
  623. return 0;
  624. }
  625. }
  626. if (!ok)
  627. return 0;
  628. if (L11 == A) {
  629. B = L12; C = L2;
  630. } else if (L12 == A) {
  631. B = L11; C = L2;
  632. } else if (L21 == A) {
  633. B = L22; C = L1;
  634. } else if (L22 == A) {
  635. B = L21; C = L1;
  636. }
  637. unsigned left_type = getTypeOfMaskedICmp(A, B, C, LHSCC);
  638. unsigned right_type = getTypeOfMaskedICmp(A, D, E, RHSCC);
  639. return left_type & right_type;
  640. }
  641. /// foldLogOpOfMaskedICmps:
  642. /// try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
  643. /// into a single (icmp(A & X) ==/!= Y)
  644. static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
  645. llvm::InstCombiner::BuilderTy *Builder) {
  646. Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
  647. ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
  648. unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS,
  649. LHSCC, RHSCC);
  650. if (mask == 0) return nullptr;
  651. assert(ICmpInst::isEquality(LHSCC) && ICmpInst::isEquality(RHSCC) &&
  652. "foldLogOpOfMaskedICmpsHelper must return an equality predicate.");
  653. // In full generality:
  654. // (icmp (A & B) Op C) | (icmp (A & D) Op E)
  655. // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
  656. //
  657. // If the latter can be converted into (icmp (A & X) Op Y) then the former is
  658. // equivalent to (icmp (A & X) !Op Y).
  659. //
  660. // Therefore, we can pretend for the rest of this function that we're dealing
  661. // with the conjunction, provided we flip the sense of any comparisons (both
  662. // input and output).
  663. // In most cases we're going to produce an EQ for the "&&" case.
  664. ICmpInst::Predicate NEWCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
  665. if (!IsAnd) {
  666. // Convert the masking analysis into its equivalent with negated
  667. // comparisons.
  668. mask = conjugateICmpMask(mask);
  669. }
  670. if (mask & FoldMskICmp_Mask_AllZeroes) {
  671. // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
  672. // -> (icmp eq (A & (B|D)), 0)
  673. Value *newOr = Builder->CreateOr(B, D);
  674. Value *newAnd = Builder->CreateAnd(A, newOr);
  675. // we can't use C as zero, because we might actually handle
  676. // (icmp ne (A & B), B) & (icmp ne (A & D), D)
  677. // with B and D, having a single bit set
  678. Value *zero = Constant::getNullValue(A->getType());
  679. return Builder->CreateICmp(NEWCC, newAnd, zero);
  680. }
  681. if (mask & FoldMskICmp_BMask_AllOnes) {
  682. // (icmp eq (A & B), B) & (icmp eq (A & D), D)
  683. // -> (icmp eq (A & (B|D)), (B|D))
  684. Value *newOr = Builder->CreateOr(B, D);
  685. Value *newAnd = Builder->CreateAnd(A, newOr);
  686. return Builder->CreateICmp(NEWCC, newAnd, newOr);
  687. }
  688. if (mask & FoldMskICmp_AMask_AllOnes) {
  689. // (icmp eq (A & B), A) & (icmp eq (A & D), A)
  690. // -> (icmp eq (A & (B&D)), A)
  691. Value *newAnd1 = Builder->CreateAnd(B, D);
  692. Value *newAnd = Builder->CreateAnd(A, newAnd1);
  693. return Builder->CreateICmp(NEWCC, newAnd, A);
  694. }
  695. // Remaining cases assume at least that B and D are constant, and depend on
  696. // their actual values. This isn't strictly, necessary, just a "handle the
  697. // easy cases for now" decision.
  698. ConstantInt *BCst = dyn_cast<ConstantInt>(B);
  699. if (!BCst) return nullptr;
  700. ConstantInt *DCst = dyn_cast<ConstantInt>(D);
  701. if (!DCst) return nullptr;
  702. if (mask & (FoldMskICmp_Mask_NotAllZeroes | FoldMskICmp_BMask_NotAllOnes)) {
  703. // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
  704. // (icmp ne (A & B), B) & (icmp ne (A & D), D)
  705. // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
  706. // Only valid if one of the masks is a superset of the other (check "B&D" is
  707. // the same as either B or D).
  708. APInt NewMask = BCst->getValue() & DCst->getValue();
  709. if (NewMask == BCst->getValue())
  710. return LHS;
  711. else if (NewMask == DCst->getValue())
  712. return RHS;
  713. }
  714. if (mask & FoldMskICmp_AMask_NotAllOnes) {
  715. // (icmp ne (A & B), B) & (icmp ne (A & D), D)
  716. // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
  717. // Only valid if one of the masks is a superset of the other (check "B|D" is
  718. // the same as either B or D).
  719. APInt NewMask = BCst->getValue() | DCst->getValue();
  720. if (NewMask == BCst->getValue())
  721. return LHS;
  722. else if (NewMask == DCst->getValue())
  723. return RHS;
  724. }
  725. if (mask & FoldMskICmp_BMask_Mixed) {
  726. // (icmp eq (A & B), C) & (icmp eq (A & D), E)
  727. // We already know that B & C == C && D & E == E.
  728. // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
  729. // C and E, which are shared by both the mask B and the mask D, don't
  730. // contradict, then we can transform to
  731. // -> (icmp eq (A & (B|D)), (C|E))
  732. // Currently, we only handle the case of B, C, D, and E being constant.
  733. // we can't simply use C and E, because we might actually handle
  734. // (icmp ne (A & B), B) & (icmp eq (A & D), D)
  735. // with B and D, having a single bit set
  736. ConstantInt *CCst = dyn_cast<ConstantInt>(C);
  737. if (!CCst) return nullptr;
  738. ConstantInt *ECst = dyn_cast<ConstantInt>(E);
  739. if (!ECst) return nullptr;
  740. if (LHSCC != NEWCC)
  741. CCst = cast<ConstantInt>(ConstantExpr::getXor(BCst, CCst));
  742. if (RHSCC != NEWCC)
  743. ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst));
  744. // if there is a conflict we should actually return a false for the
  745. // whole construct
  746. if (((BCst->getValue() & DCst->getValue()) &
  747. (CCst->getValue() ^ ECst->getValue())) != 0)
  748. return ConstantInt::get(LHS->getType(), !IsAnd);
  749. Value *newOr1 = Builder->CreateOr(B, D);
  750. Value *newOr2 = ConstantExpr::getOr(CCst, ECst);
  751. Value *newAnd = Builder->CreateAnd(A, newOr1);
  752. return Builder->CreateICmp(NEWCC, newAnd, newOr2);
  753. }
  754. return nullptr;
  755. }
  756. /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
  757. /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
  758. /// If \p Inverted is true then the check is for the inverted range, e.g.
  759. /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
  760. Value *InstCombiner::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
  761. bool Inverted) {
  762. // Check the lower range comparison, e.g. x >= 0
  763. // InstCombine already ensured that if there is a constant it's on the RHS.
  764. ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1));
  765. if (!RangeStart)
  766. return nullptr;
  767. ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
  768. Cmp0->getPredicate());
  769. // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
  770. if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
  771. (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
  772. return nullptr;
  773. ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
  774. Cmp1->getPredicate());
  775. Value *Input = Cmp0->getOperand(0);
  776. Value *RangeEnd;
  777. if (Cmp1->getOperand(0) == Input) {
  778. // For the upper range compare we have: icmp x, n
  779. RangeEnd = Cmp1->getOperand(1);
  780. } else if (Cmp1->getOperand(1) == Input) {
  781. // For the upper range compare we have: icmp n, x
  782. RangeEnd = Cmp1->getOperand(0);
  783. Pred1 = ICmpInst::getSwappedPredicate(Pred1);
  784. } else {
  785. return nullptr;
  786. }
  787. // Check the upper range comparison, e.g. x < n
  788. ICmpInst::Predicate NewPred;
  789. switch (Pred1) {
  790. case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
  791. case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
  792. default: return nullptr;
  793. }
  794. // This simplification is only valid if the upper range is not negative.
  795. bool IsNegative, IsNotNegative;
  796. ComputeSignBit(RangeEnd, IsNotNegative, IsNegative, /*Depth=*/0, Cmp1);
  797. if (!IsNotNegative)
  798. return nullptr;
  799. if (Inverted)
  800. NewPred = ICmpInst::getInversePredicate(NewPred);
  801. return Builder->CreateICmp(NewPred, Input, RangeEnd);
  802. }
  803. /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
  804. Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
  805. ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
  806. // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
  807. if (PredicatesFoldable(LHSCC, RHSCC)) {
  808. if (LHS->getOperand(0) == RHS->getOperand(1) &&
  809. LHS->getOperand(1) == RHS->getOperand(0))
  810. LHS->swapOperands();
  811. if (LHS->getOperand(0) == RHS->getOperand(0) &&
  812. LHS->getOperand(1) == RHS->getOperand(1)) {
  813. Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
  814. unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
  815. bool isSigned = LHS->isSigned() || RHS->isSigned();
  816. return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
  817. }
  818. }
  819. // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E)
  820. if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, true, Builder))
  821. return V;
  822. // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
  823. if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false))
  824. return V;
  825. // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
  826. if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/false))
  827. return V;
  828. // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
  829. Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
  830. ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
  831. ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
  832. if (!LHSCst || !RHSCst) return nullptr;
  833. if (LHSCst == RHSCst && LHSCC == RHSCC) {
  834. // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
  835. // where C is a power of 2
  836. if (LHSCC == ICmpInst::ICMP_ULT &&
  837. LHSCst->getValue().isPowerOf2()) {
  838. Value *NewOr = Builder->CreateOr(Val, Val2);
  839. return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
  840. }
  841. // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
  842. if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) {
  843. Value *NewOr = Builder->CreateOr(Val, Val2);
  844. return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
  845. }
  846. }
  847. // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
  848. // where CMAX is the all ones value for the truncated type,
  849. // iff the lower bits of C2 and CA are zero.
  850. if (LHSCC == ICmpInst::ICMP_EQ && LHSCC == RHSCC &&
  851. LHS->hasOneUse() && RHS->hasOneUse()) {
  852. Value *V;
  853. ConstantInt *AndCst, *SmallCst = nullptr, *BigCst = nullptr;
  854. // (trunc x) == C1 & (and x, CA) == C2
  855. // (and x, CA) == C2 & (trunc x) == C1
  856. if (match(Val2, m_Trunc(m_Value(V))) &&
  857. match(Val, m_And(m_Specific(V), m_ConstantInt(AndCst)))) {
  858. SmallCst = RHSCst;
  859. BigCst = LHSCst;
  860. } else if (match(Val, m_Trunc(m_Value(V))) &&
  861. match(Val2, m_And(m_Specific(V), m_ConstantInt(AndCst)))) {
  862. SmallCst = LHSCst;
  863. BigCst = RHSCst;
  864. }
  865. if (SmallCst && BigCst) {
  866. unsigned BigBitSize = BigCst->getType()->getBitWidth();
  867. unsigned SmallBitSize = SmallCst->getType()->getBitWidth();
  868. // Check that the low bits are zero.
  869. APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
  870. if ((Low & AndCst->getValue()) == 0 && (Low & BigCst->getValue()) == 0) {
  871. Value *NewAnd = Builder->CreateAnd(V, Low | AndCst->getValue());
  872. APInt N = SmallCst->getValue().zext(BigBitSize) | BigCst->getValue();
  873. Value *NewVal = ConstantInt::get(AndCst->getType()->getContext(), N);
  874. return Builder->CreateICmp(LHSCC, NewAnd, NewVal);
  875. }
  876. }
  877. }
  878. // From here on, we only handle:
  879. // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
  880. if (Val != Val2) return nullptr;
  881. // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
  882. if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
  883. RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
  884. LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
  885. RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
  886. return nullptr;
  887. // Make a constant range that's the intersection of the two icmp ranges.
  888. // If the intersection is empty, we know that the result is false.
  889. ConstantRange LHSRange =
  890. ConstantRange::makeAllowedICmpRegion(LHSCC, LHSCst->getValue());
  891. ConstantRange RHSRange =
  892. ConstantRange::makeAllowedICmpRegion(RHSCC, RHSCst->getValue());
  893. if (LHSRange.intersectWith(RHSRange).isEmptySet())
  894. return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
  895. // We can't fold (ugt x, C) & (sgt x, C2).
  896. if (!PredicatesFoldable(LHSCC, RHSCC))
  897. return nullptr;
  898. // Ensure that the larger constant is on the RHS.
  899. bool ShouldSwap;
  900. if (CmpInst::isSigned(LHSCC) ||
  901. (ICmpInst::isEquality(LHSCC) &&
  902. CmpInst::isSigned(RHSCC)))
  903. ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
  904. else
  905. ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
  906. if (ShouldSwap) {
  907. std::swap(LHS, RHS);
  908. std::swap(LHSCst, RHSCst);
  909. std::swap(LHSCC, RHSCC);
  910. }
  911. // At this point, we know we have two icmp instructions
  912. // comparing a value against two constants and and'ing the result
  913. // together. Because of the above check, we know that we only have
  914. // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
  915. // (from the icmp folding check above), that the two constants
  916. // are not equal and that the larger constant is on the RHS
  917. assert(LHSCst != RHSCst && "Compares not folded above?");
  918. switch (LHSCC) {
  919. default: llvm_unreachable("Unknown integer condition code!");
  920. case ICmpInst::ICMP_EQ:
  921. switch (RHSCC) {
  922. default: llvm_unreachable("Unknown integer condition code!");
  923. case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
  924. case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
  925. case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
  926. return LHS;
  927. }
  928. case ICmpInst::ICMP_NE:
  929. switch (RHSCC) {
  930. default: llvm_unreachable("Unknown integer condition code!");
  931. case ICmpInst::ICMP_ULT:
  932. if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
  933. return Builder->CreateICmpULT(Val, LHSCst);
  934. if (LHSCst->isNullValue()) // (X != 0 & X u< 14) -> X-1 u< 13
  935. return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, false, true);
  936. break; // (X != 13 & X u< 15) -> no change
  937. case ICmpInst::ICMP_SLT:
  938. if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
  939. return Builder->CreateICmpSLT(Val, LHSCst);
  940. break; // (X != 13 & X s< 15) -> no change
  941. case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
  942. case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
  943. case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
  944. return RHS;
  945. case ICmpInst::ICMP_NE:
  946. // Special case to get the ordering right when the values wrap around
  947. // zero.
  948. if (LHSCst->getValue() == 0 && RHSCst->getValue().isAllOnesValue())
  949. std::swap(LHSCst, RHSCst);
  950. if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
  951. Constant *AddCST = ConstantExpr::getNeg(LHSCst);
  952. Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
  953. return Builder->CreateICmpUGT(Add, ConstantInt::get(Add->getType(), 1),
  954. Val->getName()+".cmp");
  955. }
  956. break; // (X != 13 & X != 15) -> no change
  957. }
  958. break;
  959. case ICmpInst::ICMP_ULT:
  960. switch (RHSCC) {
  961. default: llvm_unreachable("Unknown integer condition code!");
  962. case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
  963. case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
  964. return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
  965. case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
  966. break;
  967. case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
  968. case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
  969. return LHS;
  970. case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
  971. break;
  972. }
  973. break;
  974. case ICmpInst::ICMP_SLT:
  975. switch (RHSCC) {
  976. default: llvm_unreachable("Unknown integer condition code!");
  977. case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
  978. break;
  979. case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
  980. case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
  981. return LHS;
  982. case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
  983. break;
  984. }
  985. break;
  986. case ICmpInst::ICMP_UGT:
  987. switch (RHSCC) {
  988. default: llvm_unreachable("Unknown integer condition code!");
  989. case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
  990. case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
  991. return RHS;
  992. case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
  993. break;
  994. case ICmpInst::ICMP_NE:
  995. if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
  996. return Builder->CreateICmp(LHSCC, Val, RHSCst);
  997. break; // (X u> 13 & X != 15) -> no change
  998. case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
  999. return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, false, true);
  1000. case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
  1001. break;
  1002. }
  1003. break;
  1004. case ICmpInst::ICMP_SGT:
  1005. switch (RHSCC) {
  1006. default: llvm_unreachable("Unknown integer condition code!");
  1007. case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
  1008. case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
  1009. return RHS;
  1010. case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
  1011. break;
  1012. case ICmpInst::ICMP_NE:
  1013. if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
  1014. return Builder->CreateICmp(LHSCC, Val, RHSCst);
  1015. break; // (X s> 13 & X != 15) -> no change
  1016. case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
  1017. return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, true, true);
  1018. case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
  1019. break;
  1020. }
  1021. break;
  1022. }
  1023. return nullptr;
  1024. }
  1025. /// FoldAndOfFCmps - Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of
  1026. /// instcombine, this returns a Value which should already be inserted into the
  1027. /// function.
  1028. Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
  1029. if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
  1030. RHS->getPredicate() == FCmpInst::FCMP_ORD) {
  1031. if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType())
  1032. return nullptr;
  1033. // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
  1034. if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
  1035. if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
  1036. // If either of the constants are nans, then the whole thing returns
  1037. // false.
  1038. if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
  1039. return Builder->getFalse();
  1040. return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
  1041. }
  1042. // Handle vector zeros. This occurs because the canonical form of
  1043. // "fcmp ord x,x" is "fcmp ord x, 0".
  1044. if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
  1045. isa<ConstantAggregateZero>(RHS->getOperand(1)))
  1046. return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
  1047. return nullptr;
  1048. }
  1049. Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
  1050. Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
  1051. FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
  1052. if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
  1053. // Swap RHS operands to match LHS.
  1054. Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
  1055. std::swap(Op1LHS, Op1RHS);
  1056. }
  1057. if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
  1058. // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
  1059. if (Op0CC == Op1CC)
  1060. return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
  1061. if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
  1062. return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
  1063. if (Op0CC == FCmpInst::FCMP_TRUE)
  1064. return RHS;
  1065. if (Op1CC == FCmpInst::FCMP_TRUE)
  1066. return LHS;
  1067. bool Op0Ordered;
  1068. bool Op1Ordered;
  1069. unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
  1070. unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
  1071. // uno && ord -> false
  1072. if (Op0Pred == 0 && Op1Pred == 0 && Op0Ordered != Op1Ordered)
  1073. return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
  1074. if (Op1Pred == 0) {
  1075. std::swap(LHS, RHS);
  1076. std::swap(Op0Pred, Op1Pred);
  1077. std::swap(Op0Ordered, Op1Ordered);
  1078. }
  1079. if (Op0Pred == 0) {
  1080. // uno && ueq -> uno && (uno || eq) -> uno
  1081. // ord && olt -> ord && (ord && lt) -> olt
  1082. if (!Op0Ordered && (Op0Ordered == Op1Ordered))
  1083. return LHS;
  1084. if (Op0Ordered && (Op0Ordered == Op1Ordered))
  1085. return RHS;
  1086. // uno && oeq -> uno && (ord && eq) -> false
  1087. if (!Op0Ordered)
  1088. return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
  1089. // ord && ueq -> ord && (uno || eq) -> oeq
  1090. return getFCmpValue(true, Op1Pred, Op0LHS, Op0RHS, Builder);
  1091. }
  1092. }
  1093. return nullptr;
  1094. }
  1095. Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
  1096. bool Changed = SimplifyAssociativeOrCommutative(I);
  1097. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  1098. if (Value *V = SimplifyVectorOp(I))
  1099. return ReplaceInstUsesWith(I, V);
  1100. if (Value *V = SimplifyAndInst(Op0, Op1, DL, TLI, DT, AC))
  1101. return ReplaceInstUsesWith(I, V);
  1102. // (A|B)&(A|C) -> A|(B&C) etc
  1103. if (Value *V = SimplifyUsingDistributiveLaws(I))
  1104. return ReplaceInstUsesWith(I, V);
  1105. // See if we can simplify any instructions used by the instruction whose sole
  1106. // purpose is to compute bits we don't care about.
  1107. if (SimplifyDemandedInstructionBits(I))
  1108. return &I;
  1109. if (Value *V = SimplifyBSwap(I))
  1110. return ReplaceInstUsesWith(I, V);
  1111. if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
  1112. const APInt &AndRHSMask = AndRHS->getValue();
  1113. // Optimize a variety of ((val OP C1) & C2) combinations...
  1114. if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
  1115. Value *Op0LHS = Op0I->getOperand(0);
  1116. Value *Op0RHS = Op0I->getOperand(1);
  1117. switch (Op0I->getOpcode()) {
  1118. default: break;
  1119. case Instruction::Xor:
  1120. case Instruction::Or: {
  1121. // If the mask is only needed on one incoming arm, push it up.
  1122. if (!Op0I->hasOneUse()) break;
  1123. APInt NotAndRHS(~AndRHSMask);
  1124. if (MaskedValueIsZero(Op0LHS, NotAndRHS, 0, &I)) {
  1125. // Not masking anything out for the LHS, move to RHS.
  1126. Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
  1127. Op0RHS->getName()+".masked");
  1128. return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
  1129. }
  1130. if (!isa<Constant>(Op0RHS) &&
  1131. MaskedValueIsZero(Op0RHS, NotAndRHS, 0, &I)) {
  1132. // Not masking anything out for the RHS, move to LHS.
  1133. Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
  1134. Op0LHS->getName()+".masked");
  1135. return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
  1136. }
  1137. break;
  1138. }
  1139. case Instruction::Add:
  1140. // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
  1141. // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
  1142. // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
  1143. if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
  1144. return BinaryOperator::CreateAnd(V, AndRHS);
  1145. if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
  1146. return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
  1147. break;
  1148. case Instruction::Sub:
  1149. // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
  1150. // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
  1151. // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
  1152. if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
  1153. return BinaryOperator::CreateAnd(V, AndRHS);
  1154. // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
  1155. // has 1's for all bits that the subtraction with A might affect.
  1156. if (Op0I->hasOneUse() && !match(Op0LHS, m_Zero())) {
  1157. uint32_t BitWidth = AndRHSMask.getBitWidth();
  1158. uint32_t Zeros = AndRHSMask.countLeadingZeros();
  1159. APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
  1160. if (MaskedValueIsZero(Op0LHS, Mask, 0, &I)) {
  1161. Value *NewNeg = Builder->CreateNeg(Op0RHS);
  1162. return BinaryOperator::CreateAnd(NewNeg, AndRHS);
  1163. }
  1164. }
  1165. break;
  1166. case Instruction::Shl:
  1167. case Instruction::LShr:
  1168. // (1 << x) & 1 --> zext(x == 0)
  1169. // (1 >> x) & 1 --> zext(x == 0)
  1170. if (AndRHSMask == 1 && Op0LHS == AndRHS) {
  1171. Value *NewICmp =
  1172. Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
  1173. return new ZExtInst(NewICmp, I.getType());
  1174. }
  1175. break;
  1176. }
  1177. if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
  1178. if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
  1179. return Res;
  1180. }
  1181. // If this is an integer truncation, and if the source is an 'and' with
  1182. // immediate, transform it. This frequently occurs for bitfield accesses.
  1183. {
  1184. Value *X = nullptr; ConstantInt *YC = nullptr;
  1185. if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
  1186. // Change: and (trunc (and X, YC) to T), C2
  1187. // into : and (trunc X to T), trunc(YC) & C2
  1188. // This will fold the two constants together, which may allow
  1189. // other simplifications.
  1190. Value *NewCast = Builder->CreateTrunc(X, I.getType(), "and.shrunk");
  1191. Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
  1192. C3 = ConstantExpr::getAnd(C3, AndRHS);
  1193. return BinaryOperator::CreateAnd(NewCast, C3);
  1194. }
  1195. }
  1196. // Try to fold constant and into select arguments.
  1197. if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
  1198. if (Instruction *R = FoldOpIntoSelect(I, SI))
  1199. return R;
  1200. if (isa<PHINode>(Op0))
  1201. if (Instruction *NV = FoldOpIntoPhi(I))
  1202. return NV;
  1203. }
  1204. // (~A & ~B) == (~(A | B)) - De Morgan's Law
  1205. if (Value *Op0NotVal = dyn_castNotVal(Op0))
  1206. if (Value *Op1NotVal = dyn_castNotVal(Op1))
  1207. if (Op0->hasOneUse() && Op1->hasOneUse()) {
  1208. Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
  1209. I.getName()+".demorgan");
  1210. return BinaryOperator::CreateNot(Or);
  1211. }
  1212. {
  1213. Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
  1214. // (A|B) & ~(A&B) -> A^B
  1215. if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
  1216. match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
  1217. ((A == C && B == D) || (A == D && B == C)))
  1218. return BinaryOperator::CreateXor(A, B);
  1219. // ~(A&B) & (A|B) -> A^B
  1220. if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
  1221. match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
  1222. ((A == C && B == D) || (A == D && B == C)))
  1223. return BinaryOperator::CreateXor(A, B);
  1224. // A&(A^B) => A & ~B
  1225. {
  1226. Value *tmpOp0 = Op0;
  1227. Value *tmpOp1 = Op1;
  1228. if (Op0->hasOneUse() &&
  1229. match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
  1230. if (A == Op1 || B == Op1 ) {
  1231. tmpOp1 = Op0;
  1232. tmpOp0 = Op1;
  1233. // Simplify below
  1234. }
  1235. }
  1236. if (tmpOp1->hasOneUse() &&
  1237. match(tmpOp1, m_Xor(m_Value(A), m_Value(B)))) {
  1238. if (B == tmpOp0) {
  1239. std::swap(A, B);
  1240. }
  1241. // Notice that the patten (A&(~B)) is actually (A&(-1^B)), so if
  1242. // A is originally -1 (or a vector of -1 and undefs), then we enter
  1243. // an endless loop. By checking that A is non-constant we ensure that
  1244. // we will never get to the loop.
  1245. if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B
  1246. return BinaryOperator::CreateAnd(A, Builder->CreateNot(B));
  1247. }
  1248. }
  1249. // (A&((~A)|B)) -> A&B
  1250. if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
  1251. match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
  1252. return BinaryOperator::CreateAnd(A, Op1);
  1253. if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
  1254. match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
  1255. return BinaryOperator::CreateAnd(A, Op0);
  1256. // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
  1257. if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
  1258. if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
  1259. if (Op1->hasOneUse() || cast<BinaryOperator>(Op1)->hasOneUse())
  1260. return BinaryOperator::CreateAnd(Op0, Builder->CreateNot(C));
  1261. // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
  1262. if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
  1263. if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
  1264. if (Op0->hasOneUse() || cast<BinaryOperator>(Op0)->hasOneUse())
  1265. return BinaryOperator::CreateAnd(Op1, Builder->CreateNot(C));
  1266. // (A | B) & ((~A) ^ B) -> (A & B)
  1267. if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
  1268. match(Op1, m_Xor(m_Not(m_Specific(A)), m_Specific(B))))
  1269. return BinaryOperator::CreateAnd(A, B);
  1270. // ((~A) ^ B) & (A | B) -> (A & B)
  1271. if (match(Op0, m_Xor(m_Not(m_Value(A)), m_Value(B))) &&
  1272. match(Op1, m_Or(m_Specific(A), m_Specific(B))))
  1273. return BinaryOperator::CreateAnd(A, B);
  1274. }
  1275. {
  1276. ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
  1277. ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
  1278. if (LHS && RHS)
  1279. if (Value *Res = FoldAndOfICmps(LHS, RHS))
  1280. return ReplaceInstUsesWith(I, Res);
  1281. // TODO: Make this recursive; it's a little tricky because an arbitrary
  1282. // number of 'and' instructions might have to be created.
  1283. Value *X, *Y;
  1284. if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
  1285. if (auto *Cmp = dyn_cast<ICmpInst>(X))
  1286. if (Value *Res = FoldAndOfICmps(LHS, Cmp))
  1287. return ReplaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
  1288. if (auto *Cmp = dyn_cast<ICmpInst>(Y))
  1289. if (Value *Res = FoldAndOfICmps(LHS, Cmp))
  1290. return ReplaceInstUsesWith(I, Builder->CreateAnd(Res, X));
  1291. }
  1292. if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
  1293. if (auto *Cmp = dyn_cast<ICmpInst>(X))
  1294. if (Value *Res = FoldAndOfICmps(Cmp, RHS))
  1295. return ReplaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
  1296. if (auto *Cmp = dyn_cast<ICmpInst>(Y))
  1297. if (Value *Res = FoldAndOfICmps(Cmp, RHS))
  1298. return ReplaceInstUsesWith(I, Builder->CreateAnd(Res, X));
  1299. }
  1300. }
  1301. // If and'ing two fcmp, try combine them into one.
  1302. if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
  1303. if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
  1304. if (Value *Res = FoldAndOfFCmps(LHS, RHS))
  1305. return ReplaceInstUsesWith(I, Res);
  1306. // fold (and (cast A), (cast B)) -> (cast (and A, B))
  1307. if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
  1308. if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
  1309. Type *SrcTy = Op0C->getOperand(0)->getType();
  1310. if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
  1311. SrcTy == Op1C->getOperand(0)->getType() &&
  1312. SrcTy->isIntOrIntVectorTy()) {
  1313. Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
  1314. // Only do this if the casts both really cause code to be generated.
  1315. if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
  1316. ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
  1317. Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName());
  1318. return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
  1319. }
  1320. // If this is and(cast(icmp), cast(icmp)), try to fold this even if the
  1321. // cast is otherwise not optimizable. This happens for vector sexts.
  1322. if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
  1323. if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
  1324. if (Value *Res = FoldAndOfICmps(LHS, RHS))
  1325. return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
  1326. // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the
  1327. // cast is otherwise not optimizable. This happens for vector sexts.
  1328. if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
  1329. if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
  1330. if (Value *Res = FoldAndOfFCmps(LHS, RHS))
  1331. return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
  1332. }
  1333. }
  1334. {
  1335. Value *X = nullptr;
  1336. bool OpsSwapped = false;
  1337. // Canonicalize SExt or Not to the LHS
  1338. if (match(Op1, m_SExt(m_Value())) ||
  1339. match(Op1, m_Not(m_Value()))) {
  1340. std::swap(Op0, Op1);
  1341. OpsSwapped = true;
  1342. }
  1343. // Fold (and (sext bool to A), B) --> (select bool, B, 0)
  1344. if (match(Op0, m_SExt(m_Value(X))) &&
  1345. X->getType()->getScalarType()->isIntegerTy(1)) {
  1346. Value *Zero = Constant::getNullValue(Op1->getType());
  1347. return SelectInst::Create(X, Op1, Zero);
  1348. }
  1349. // Fold (and ~(sext bool to A), B) --> (select bool, 0, B)
  1350. if (match(Op0, m_Not(m_SExt(m_Value(X)))) &&
  1351. X->getType()->getScalarType()->isIntegerTy(1)) {
  1352. Value *Zero = Constant::getNullValue(Op0->getType());
  1353. return SelectInst::Create(X, Zero, Op1);
  1354. }
  1355. if (OpsSwapped)
  1356. std::swap(Op0, Op1);
  1357. }
  1358. return Changed ? &I : nullptr;
  1359. }
  1360. /// CollectBSwapParts - Analyze the specified subexpression and see if it is
  1361. /// capable of providing pieces of a bswap. The subexpression provides pieces
  1362. /// of a bswap if it is proven that each of the non-zero bytes in the output of
  1363. /// the expression came from the corresponding "byte swapped" byte in some other
  1364. /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
  1365. /// we know that the expression deposits the low byte of %X into the high byte
  1366. /// of the bswap result and that all other bytes are zero. This expression is
  1367. /// accepted, the high byte of ByteValues is set to X to indicate a correct
  1368. /// match.
  1369. ///
  1370. /// This function returns true if the match was unsuccessful and false if so.
  1371. /// On entry to the function the "OverallLeftShift" is a signed integer value
  1372. /// indicating the number of bytes that the subexpression is later shifted. For
  1373. /// example, if the expression is later right shifted by 16 bits, the
  1374. /// OverallLeftShift value would be -2 on entry. This is used to specify which
  1375. /// byte of ByteValues is actually being set.
  1376. ///
  1377. /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
  1378. /// byte is masked to zero by a user. For example, in (X & 255), X will be
  1379. /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
  1380. /// this function to working on up to 32-byte (256 bit) values. ByteMask is
  1381. /// always in the local (OverallLeftShift) coordinate space.
  1382. ///
  1383. static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
  1384. SmallVectorImpl<Value *> &ByteValues) {
  1385. if (Instruction *I = dyn_cast<Instruction>(V)) {
  1386. // If this is an or instruction, it may be an inner node of the bswap.
  1387. if (I->getOpcode() == Instruction::Or) {
  1388. return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
  1389. ByteValues) ||
  1390. CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
  1391. ByteValues);
  1392. }
  1393. // If this is a logical shift by a constant multiple of 8, recurse with
  1394. // OverallLeftShift and ByteMask adjusted.
  1395. if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
  1396. unsigned ShAmt =
  1397. cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
  1398. // Ensure the shift amount is defined and of a byte value.
  1399. if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
  1400. return true;
  1401. unsigned ByteShift = ShAmt >> 3;
  1402. if (I->getOpcode() == Instruction::Shl) {
  1403. // X << 2 -> collect(X, +2)
  1404. OverallLeftShift += ByteShift;
  1405. ByteMask >>= ByteShift;
  1406. } else {
  1407. // X >>u 2 -> collect(X, -2)
  1408. OverallLeftShift -= ByteShift;
  1409. ByteMask <<= ByteShift;
  1410. ByteMask &= (~0U >> (32-ByteValues.size()));
  1411. }
  1412. if (OverallLeftShift >= (int)ByteValues.size()) return true;
  1413. if (OverallLeftShift <= -(int)ByteValues.size()) return true;
  1414. return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
  1415. ByteValues);
  1416. }
  1417. // If this is a logical 'and' with a mask that clears bytes, clear the
  1418. // corresponding bytes in ByteMask.
  1419. if (I->getOpcode() == Instruction::And &&
  1420. isa<ConstantInt>(I->getOperand(1))) {
  1421. // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
  1422. unsigned NumBytes = ByteValues.size();
  1423. APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
  1424. const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
  1425. for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
  1426. // If this byte is masked out by a later operation, we don't care what
  1427. // the and mask is.
  1428. if ((ByteMask & (1 << i)) == 0)
  1429. continue;
  1430. // If the AndMask is all zeros for this byte, clear the bit.
  1431. APInt MaskB = AndMask & Byte;
  1432. if (MaskB == 0) {
  1433. ByteMask &= ~(1U << i);
  1434. continue;
  1435. }
  1436. // If the AndMask is not all ones for this byte, it's not a bytezap.
  1437. if (MaskB != Byte)
  1438. return true;
  1439. // Otherwise, this byte is kept.
  1440. }
  1441. return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
  1442. ByteValues);
  1443. }
  1444. }
  1445. // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
  1446. // the input value to the bswap. Some observations: 1) if more than one byte
  1447. // is demanded from this input, then it could not be successfully assembled
  1448. // into a byteswap. At least one of the two bytes would not be aligned with
  1449. // their ultimate destination.
  1450. if (!isPowerOf2_32(ByteMask)) return true;
  1451. unsigned InputByteNo = countTrailingZeros(ByteMask);
  1452. // 2) The input and ultimate destinations must line up: if byte 3 of an i32
  1453. // is demanded, it needs to go into byte 0 of the result. This means that the
  1454. // byte needs to be shifted until it lands in the right byte bucket. The
  1455. // shift amount depends on the position: if the byte is coming from the high
  1456. // part of the value (e.g. byte 3) then it must be shifted right. If from the
  1457. // low part, it must be shifted left.
  1458. unsigned DestByteNo = InputByteNo + OverallLeftShift;
  1459. if (ByteValues.size()-1-DestByteNo != InputByteNo)
  1460. return true;
  1461. // If the destination byte value is already defined, the values are or'd
  1462. // together, which isn't a bswap (unless it's an or of the same bits).
  1463. if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
  1464. return true;
  1465. ByteValues[DestByteNo] = V;
  1466. return false;
  1467. }
  1468. /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
  1469. /// If so, insert the new bswap intrinsic and return it.
  1470. Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
  1471. IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
  1472. if (!ITy || ITy->getBitWidth() % 16 ||
  1473. // ByteMask only allows up to 32-byte values.
  1474. ITy->getBitWidth() > 32*8)
  1475. return nullptr; // Can only bswap pairs of bytes. Can't do vectors.
  1476. /// ByteValues - For each byte of the result, we keep track of which value
  1477. /// defines each byte.
  1478. SmallVector<Value*, 8> ByteValues;
  1479. ByteValues.resize(ITy->getBitWidth()/8);
  1480. // Try to find all the pieces corresponding to the bswap.
  1481. uint32_t ByteMask = ~0U >> (32-ByteValues.size());
  1482. if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
  1483. return nullptr;
  1484. // Check to see if all of the bytes come from the same value.
  1485. Value *V = ByteValues[0];
  1486. if (!V) return nullptr; // Didn't find a byte? Must be zero.
  1487. // Check to make sure that all of the bytes come from the same value.
  1488. for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
  1489. if (ByteValues[i] != V)
  1490. return nullptr;
  1491. Module *M = I.getParent()->getParent()->getParent();
  1492. Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, ITy);
  1493. return CallInst::Create(F, V);
  1494. }
  1495. /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
  1496. /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
  1497. /// we can simplify this expression to "cond ? C : D or B".
  1498. static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
  1499. Value *C, Value *D) {
  1500. // If A is not a select of -1/0, this cannot match.
  1501. Value *Cond = nullptr;
  1502. if (!match(A, m_SExt(m_Value(Cond))) ||
  1503. !Cond->getType()->isIntegerTy(1))
  1504. return nullptr;
  1505. // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
  1506. if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
  1507. return SelectInst::Create(Cond, C, B);
  1508. if (match(D, m_SExt(m_Not(m_Specific(Cond)))))
  1509. return SelectInst::Create(Cond, C, B);
  1510. // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
  1511. if (match(B, m_Not(m_SExt(m_Specific(Cond)))))
  1512. return SelectInst::Create(Cond, C, D);
  1513. if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
  1514. return SelectInst::Create(Cond, C, D);
  1515. return nullptr;
  1516. }
  1517. /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
  1518. Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
  1519. Instruction *CxtI) {
  1520. ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
  1521. // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
  1522. // if K1 and K2 are a one-bit mask.
  1523. ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
  1524. ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
  1525. if (LHS->getPredicate() == ICmpInst::ICMP_EQ && LHSCst && LHSCst->isZero() &&
  1526. RHS->getPredicate() == ICmpInst::ICMP_EQ && RHSCst && RHSCst->isZero()) {
  1527. BinaryOperator *LAnd = dyn_cast<BinaryOperator>(LHS->getOperand(0));
  1528. BinaryOperator *RAnd = dyn_cast<BinaryOperator>(RHS->getOperand(0));
  1529. if (LAnd && RAnd && LAnd->hasOneUse() && RHS->hasOneUse() &&
  1530. LAnd->getOpcode() == Instruction::And &&
  1531. RAnd->getOpcode() == Instruction::And) {
  1532. Value *Mask = nullptr;
  1533. Value *Masked = nullptr;
  1534. if (LAnd->getOperand(0) == RAnd->getOperand(0) &&
  1535. isKnownToBeAPowerOfTwo(LAnd->getOperand(1), DL, false, 0, AC, CxtI,
  1536. DT) &&
  1537. isKnownToBeAPowerOfTwo(RAnd->getOperand(1), DL, false, 0, AC, CxtI,
  1538. DT)) {
  1539. Mask = Builder->CreateOr(LAnd->getOperand(1), RAnd->getOperand(1));
  1540. Masked = Builder->CreateAnd(LAnd->getOperand(0), Mask);
  1541. } else if (LAnd->getOperand(1) == RAnd->getOperand(1) &&
  1542. isKnownToBeAPowerOfTwo(LAnd->getOperand(0), DL, false, 0, AC,
  1543. CxtI, DT) &&
  1544. isKnownToBeAPowerOfTwo(RAnd->getOperand(0), DL, false, 0, AC,
  1545. CxtI, DT)) {
  1546. Mask = Builder->CreateOr(LAnd->getOperand(0), RAnd->getOperand(0));
  1547. Masked = Builder->CreateAnd(LAnd->getOperand(1), Mask);
  1548. }
  1549. if (Masked)
  1550. return Builder->CreateICmp(ICmpInst::ICMP_NE, Masked, Mask);
  1551. }
  1552. }
  1553. // Fold (icmp ult/ule (A + C1), C3) | (icmp ult/ule (A + C2), C3)
  1554. // --> (icmp ult/ule ((A & ~(C1 ^ C2)) + max(C1, C2)), C3)
  1555. // The original condition actually refers to the following two ranges:
  1556. // [MAX_UINT-C1+1, MAX_UINT-C1+1+C3] and [MAX_UINT-C2+1, MAX_UINT-C2+1+C3]
  1557. // We can fold these two ranges if:
  1558. // 1) C1 and C2 is unsigned greater than C3.
  1559. // 2) The two ranges are separated.
  1560. // 3) C1 ^ C2 is one-bit mask.
  1561. // 4) LowRange1 ^ LowRange2 and HighRange1 ^ HighRange2 are one-bit mask.
  1562. // This implies all values in the two ranges differ by exactly one bit.
  1563. if ((LHSCC == ICmpInst::ICMP_ULT || LHSCC == ICmpInst::ICMP_ULE) &&
  1564. LHSCC == RHSCC && LHSCst && RHSCst && LHS->hasOneUse() &&
  1565. RHS->hasOneUse() && LHSCst->getType() == RHSCst->getType() &&
  1566. LHSCst->getValue() == (RHSCst->getValue())) {
  1567. Value *LAdd = LHS->getOperand(0);
  1568. Value *RAdd = RHS->getOperand(0);
  1569. Value *LAddOpnd, *RAddOpnd;
  1570. ConstantInt *LAddCst, *RAddCst;
  1571. if (match(LAdd, m_Add(m_Value(LAddOpnd), m_ConstantInt(LAddCst))) &&
  1572. match(RAdd, m_Add(m_Value(RAddOpnd), m_ConstantInt(RAddCst))) &&
  1573. LAddCst->getValue().ugt(LHSCst->getValue()) &&
  1574. RAddCst->getValue().ugt(LHSCst->getValue())) {
  1575. APInt DiffCst = LAddCst->getValue() ^ RAddCst->getValue();
  1576. if (LAddOpnd == RAddOpnd && DiffCst.isPowerOf2()) {
  1577. ConstantInt *MaxAddCst = nullptr;
  1578. if (LAddCst->getValue().ult(RAddCst->getValue()))
  1579. MaxAddCst = RAddCst;
  1580. else
  1581. MaxAddCst = LAddCst;
  1582. APInt RRangeLow = -RAddCst->getValue();
  1583. APInt RRangeHigh = RRangeLow + LHSCst->getValue();
  1584. APInt LRangeLow = -LAddCst->getValue();
  1585. APInt LRangeHigh = LRangeLow + LHSCst->getValue();
  1586. APInt LowRangeDiff = RRangeLow ^ LRangeLow;
  1587. APInt HighRangeDiff = RRangeHigh ^ LRangeHigh;
  1588. APInt RangeDiff = LRangeLow.sgt(RRangeLow) ? LRangeLow - RRangeLow
  1589. : RRangeLow - LRangeLow;
  1590. if (LowRangeDiff.isPowerOf2() && LowRangeDiff == HighRangeDiff &&
  1591. RangeDiff.ugt(LHSCst->getValue())) {
  1592. Value *MaskCst = ConstantInt::get(LAddCst->getType(), ~DiffCst);
  1593. Value *NewAnd = Builder->CreateAnd(LAddOpnd, MaskCst);
  1594. Value *NewAdd = Builder->CreateAdd(NewAnd, MaxAddCst);
  1595. return (Builder->CreateICmp(LHS->getPredicate(), NewAdd, LHSCst));
  1596. }
  1597. }
  1598. }
  1599. }
  1600. // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
  1601. if (PredicatesFoldable(LHSCC, RHSCC)) {
  1602. if (LHS->getOperand(0) == RHS->getOperand(1) &&
  1603. LHS->getOperand(1) == RHS->getOperand(0))
  1604. LHS->swapOperands();
  1605. if (LHS->getOperand(0) == RHS->getOperand(0) &&
  1606. LHS->getOperand(1) == RHS->getOperand(1)) {
  1607. Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
  1608. unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
  1609. bool isSigned = LHS->isSigned() || RHS->isSigned();
  1610. return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
  1611. }
  1612. }
  1613. // handle (roughly):
  1614. // (icmp ne (A & B), C) | (icmp ne (A & D), E)
  1615. if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, false, Builder))
  1616. return V;
  1617. Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
  1618. if (LHS->hasOneUse() || RHS->hasOneUse()) {
  1619. // (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1)
  1620. // (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1)
  1621. Value *A = nullptr, *B = nullptr;
  1622. if (LHSCC == ICmpInst::ICMP_EQ && LHSCst && LHSCst->isZero()) {
  1623. B = Val;
  1624. if (RHSCC == ICmpInst::ICMP_ULT && Val == RHS->getOperand(1))
  1625. A = Val2;
  1626. else if (RHSCC == ICmpInst::ICMP_UGT && Val == Val2)
  1627. A = RHS->getOperand(1);
  1628. }
  1629. // (icmp ult A, B) | (icmp eq B, 0) -> (icmp ule A, B-1)
  1630. // (icmp ugt B, A) | (icmp eq B, 0) -> (icmp ule A, B-1)
  1631. else if (RHSCC == ICmpInst::ICMP_EQ && RHSCst && RHSCst->isZero()) {
  1632. B = Val2;
  1633. if (LHSCC == ICmpInst::ICMP_ULT && Val2 == LHS->getOperand(1))
  1634. A = Val;
  1635. else if (LHSCC == ICmpInst::ICMP_UGT && Val2 == Val)
  1636. A = LHS->getOperand(1);
  1637. }
  1638. if (A && B)
  1639. return Builder->CreateICmp(
  1640. ICmpInst::ICMP_UGE,
  1641. Builder->CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A);
  1642. }
  1643. // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
  1644. if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true))
  1645. return V;
  1646. // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
  1647. if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/true))
  1648. return V;
  1649. // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
  1650. if (!LHSCst || !RHSCst) return nullptr;
  1651. if (LHSCst == RHSCst && LHSCC == RHSCC) {
  1652. // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
  1653. if (LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
  1654. Value *NewOr = Builder->CreateOr(Val, Val2);
  1655. return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
  1656. }
  1657. }
  1658. // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
  1659. // iff C2 + CA == C1.
  1660. if (LHSCC == ICmpInst::ICMP_ULT && RHSCC == ICmpInst::ICMP_EQ) {
  1661. ConstantInt *AddCst;
  1662. if (match(Val, m_Add(m_Specific(Val2), m_ConstantInt(AddCst))))
  1663. if (RHSCst->getValue() + AddCst->getValue() == LHSCst->getValue())
  1664. return Builder->CreateICmpULE(Val, LHSCst);
  1665. }
  1666. // From here on, we only handle:
  1667. // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
  1668. if (Val != Val2) return nullptr;
  1669. // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
  1670. if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
  1671. RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
  1672. LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
  1673. RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
  1674. return nullptr;
  1675. // We can't fold (ugt x, C) | (sgt x, C2).
  1676. if (!PredicatesFoldable(LHSCC, RHSCC))
  1677. return nullptr;
  1678. // Ensure that the larger constant is on the RHS.
  1679. bool ShouldSwap;
  1680. if (CmpInst::isSigned(LHSCC) ||
  1681. (ICmpInst::isEquality(LHSCC) &&
  1682. CmpInst::isSigned(RHSCC)))
  1683. ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
  1684. else
  1685. ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
  1686. if (ShouldSwap) {
  1687. std::swap(LHS, RHS);
  1688. std::swap(LHSCst, RHSCst);
  1689. std::swap(LHSCC, RHSCC);
  1690. }
  1691. // At this point, we know we have two icmp instructions
  1692. // comparing a value against two constants and or'ing the result
  1693. // together. Because of the above check, we know that we only have
  1694. // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
  1695. // icmp folding check above), that the two constants are not
  1696. // equal.
  1697. assert(LHSCst != RHSCst && "Compares not folded above?");
  1698. switch (LHSCC) {
  1699. default: llvm_unreachable("Unknown integer condition code!");
  1700. case ICmpInst::ICMP_EQ:
  1701. switch (RHSCC) {
  1702. default: llvm_unreachable("Unknown integer condition code!");
  1703. case ICmpInst::ICMP_EQ:
  1704. if (LHS->getOperand(0) == RHS->getOperand(0)) {
  1705. // if LHSCst and RHSCst differ only by one bit:
  1706. // (A == C1 || A == C2) -> (A & ~(C1 ^ C2)) == C1
  1707. assert(LHSCst->getValue().ule(LHSCst->getValue()));
  1708. APInt Xor = LHSCst->getValue() ^ RHSCst->getValue();
  1709. if (Xor.isPowerOf2()) {
  1710. Value *NegCst = Builder->getInt(~Xor);
  1711. Value *And = Builder->CreateAnd(LHS->getOperand(0), NegCst);
  1712. return Builder->CreateICmp(ICmpInst::ICMP_EQ, And, LHSCst);
  1713. }
  1714. }
  1715. if (LHSCst == SubOne(RHSCst)) {
  1716. // (X == 13 | X == 14) -> X-13 <u 2
  1717. Constant *AddCST = ConstantExpr::getNeg(LHSCst);
  1718. Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
  1719. AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
  1720. return Builder->CreateICmpULT(Add, AddCST);
  1721. }
  1722. break; // (X == 13 | X == 15) -> no change
  1723. case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
  1724. case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
  1725. break;
  1726. case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
  1727. case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
  1728. case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
  1729. return RHS;
  1730. }
  1731. break;
  1732. case ICmpInst::ICMP_NE:
  1733. switch (RHSCC) {
  1734. default: llvm_unreachable("Unknown integer condition code!");
  1735. case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
  1736. case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
  1737. case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
  1738. return LHS;
  1739. case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
  1740. case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
  1741. case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
  1742. return Builder->getTrue();
  1743. }
  1744. case ICmpInst::ICMP_ULT:
  1745. switch (RHSCC) {
  1746. default: llvm_unreachable("Unknown integer condition code!");
  1747. case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
  1748. break;
  1749. case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
  1750. // If RHSCst is [us]MAXINT, it is always false. Not handling
  1751. // this can cause overflow.
  1752. if (RHSCst->isMaxValue(false))
  1753. return LHS;
  1754. return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), false, false);
  1755. case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
  1756. break;
  1757. case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
  1758. case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
  1759. return RHS;
  1760. case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
  1761. break;
  1762. }
  1763. break;
  1764. case ICmpInst::ICMP_SLT:
  1765. switch (RHSCC) {
  1766. default: llvm_unreachable("Unknown integer condition code!");
  1767. case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
  1768. break;
  1769. case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
  1770. // If RHSCst is [us]MAXINT, it is always false. Not handling
  1771. // this can cause overflow.
  1772. if (RHSCst->isMaxValue(true))
  1773. return LHS;
  1774. return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), true, false);
  1775. case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
  1776. break;
  1777. case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
  1778. case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
  1779. return RHS;
  1780. case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
  1781. break;
  1782. }
  1783. break;
  1784. case ICmpInst::ICMP_UGT:
  1785. switch (RHSCC) {
  1786. default: llvm_unreachable("Unknown integer condition code!");
  1787. case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
  1788. case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
  1789. return LHS;
  1790. case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
  1791. break;
  1792. case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
  1793. case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
  1794. return Builder->getTrue();
  1795. case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
  1796. break;
  1797. }
  1798. break;
  1799. case ICmpInst::ICMP_SGT:
  1800. switch (RHSCC) {
  1801. default: llvm_unreachable("Unknown integer condition code!");
  1802. case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
  1803. case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
  1804. return LHS;
  1805. case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
  1806. break;
  1807. case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
  1808. case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
  1809. return Builder->getTrue();
  1810. case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
  1811. break;
  1812. }
  1813. break;
  1814. }
  1815. return nullptr;
  1816. }
  1817. /// FoldOrOfFCmps - Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of
  1818. /// instcombine, this returns a Value which should already be inserted into the
  1819. /// function.
  1820. Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
  1821. if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
  1822. RHS->getPredicate() == FCmpInst::FCMP_UNO &&
  1823. LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
  1824. if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
  1825. if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
  1826. // If either of the constants are nans, then the whole thing returns
  1827. // true.
  1828. if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
  1829. return Builder->getTrue();
  1830. // Otherwise, no need to compare the two constants, compare the
  1831. // rest.
  1832. return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
  1833. }
  1834. // Handle vector zeros. This occurs because the canonical form of
  1835. // "fcmp uno x,x" is "fcmp uno x, 0".
  1836. if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
  1837. isa<ConstantAggregateZero>(RHS->getOperand(1)))
  1838. return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
  1839. return nullptr;
  1840. }
  1841. Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
  1842. Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
  1843. FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
  1844. if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
  1845. // Swap RHS operands to match LHS.
  1846. Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
  1847. std::swap(Op1LHS, Op1RHS);
  1848. }
  1849. if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
  1850. // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
  1851. if (Op0CC == Op1CC)
  1852. return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
  1853. if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
  1854. return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
  1855. if (Op0CC == FCmpInst::FCMP_FALSE)
  1856. return RHS;
  1857. if (Op1CC == FCmpInst::FCMP_FALSE)
  1858. return LHS;
  1859. bool Op0Ordered;
  1860. bool Op1Ordered;
  1861. unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
  1862. unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
  1863. if (Op0Ordered == Op1Ordered) {
  1864. // If both are ordered or unordered, return a new fcmp with
  1865. // or'ed predicates.
  1866. return getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS, Builder);
  1867. }
  1868. }
  1869. return nullptr;
  1870. }
  1871. /// FoldOrWithConstants - This helper function folds:
  1872. ///
  1873. /// ((A | B) & C1) | (B & C2)
  1874. ///
  1875. /// into:
  1876. ///
  1877. /// (A & C1) | B
  1878. ///
  1879. /// when the XOR of the two constants is "all ones" (-1).
  1880. Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
  1881. Value *A, Value *B, Value *C) {
  1882. ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
  1883. if (!CI1) return nullptr;
  1884. Value *V1 = nullptr;
  1885. ConstantInt *CI2 = nullptr;
  1886. if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return nullptr;
  1887. APInt Xor = CI1->getValue() ^ CI2->getValue();
  1888. if (!Xor.isAllOnesValue()) return nullptr;
  1889. if (V1 == A || V1 == B) {
  1890. Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
  1891. return BinaryOperator::CreateOr(NewOp, V1);
  1892. }
  1893. return nullptr;
  1894. }
  1895. /// \brief This helper function folds:
  1896. ///
  1897. /// ((A | B) & C1) ^ (B & C2)
  1898. ///
  1899. /// into:
  1900. ///
  1901. /// (A & C1) ^ B
  1902. ///
  1903. /// when the XOR of the two constants is "all ones" (-1).
  1904. Instruction *InstCombiner::FoldXorWithConstants(BinaryOperator &I, Value *Op,
  1905. Value *A, Value *B, Value *C) {
  1906. ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
  1907. if (!CI1)
  1908. return nullptr;
  1909. Value *V1 = nullptr;
  1910. ConstantInt *CI2 = nullptr;
  1911. if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2))))
  1912. return nullptr;
  1913. APInt Xor = CI1->getValue() ^ CI2->getValue();
  1914. if (!Xor.isAllOnesValue())
  1915. return nullptr;
  1916. if (V1 == A || V1 == B) {
  1917. Value *NewOp = Builder->CreateAnd(V1 == A ? B : A, CI1);
  1918. return BinaryOperator::CreateXor(NewOp, V1);
  1919. }
  1920. return nullptr;
  1921. }
  1922. Instruction *InstCombiner::visitOr(BinaryOperator &I) {
  1923. bool Changed = SimplifyAssociativeOrCommutative(I);
  1924. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  1925. if (Value *V = SimplifyVectorOp(I))
  1926. return ReplaceInstUsesWith(I, V);
  1927. if (Value *V = SimplifyOrInst(Op0, Op1, DL, TLI, DT, AC))
  1928. return ReplaceInstUsesWith(I, V);
  1929. // (A&B)|(A&C) -> A&(B|C) etc
  1930. if (Value *V = SimplifyUsingDistributiveLaws(I))
  1931. return ReplaceInstUsesWith(I, V);
  1932. // See if we can simplify any instructions used by the instruction whose sole
  1933. // purpose is to compute bits we don't care about.
  1934. if (SimplifyDemandedInstructionBits(I))
  1935. return &I;
  1936. if (Value *V = SimplifyBSwap(I))
  1937. return ReplaceInstUsesWith(I, V);
  1938. if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
  1939. ConstantInt *C1 = nullptr; Value *X = nullptr;
  1940. // (X & C1) | C2 --> (X | C2) & (C1|C2)
  1941. // iff (C1 & C2) == 0.
  1942. if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
  1943. (RHS->getValue() & C1->getValue()) != 0 &&
  1944. Op0->hasOneUse()) {
  1945. Value *Or = Builder->CreateOr(X, RHS);
  1946. Or->takeName(Op0);
  1947. return BinaryOperator::CreateAnd(Or,
  1948. Builder->getInt(RHS->getValue() | C1->getValue()));
  1949. }
  1950. // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
  1951. if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
  1952. Op0->hasOneUse()) {
  1953. Value *Or = Builder->CreateOr(X, RHS);
  1954. Or->takeName(Op0);
  1955. return BinaryOperator::CreateXor(Or,
  1956. Builder->getInt(C1->getValue() & ~RHS->getValue()));
  1957. }
  1958. // Try to fold constant and into select arguments.
  1959. if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
  1960. if (Instruction *R = FoldOpIntoSelect(I, SI))
  1961. return R;
  1962. if (isa<PHINode>(Op0))
  1963. if (Instruction *NV = FoldOpIntoPhi(I))
  1964. return NV;
  1965. }
  1966. Value *A = nullptr, *B = nullptr;
  1967. ConstantInt *C1 = nullptr, *C2 = nullptr;
  1968. // (A | B) | C and A | (B | C) -> bswap if possible.
  1969. // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
  1970. if (match(Op0, m_Or(m_Value(), m_Value())) ||
  1971. match(Op1, m_Or(m_Value(), m_Value())) ||
  1972. (match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
  1973. match(Op1, m_LogicalShift(m_Value(), m_Value())))) {
  1974. if (Instruction *BSwap = MatchBSwap(I))
  1975. return BSwap;
  1976. }
  1977. // (X^C)|Y -> (X|Y)^C iff Y&C == 0
  1978. if (Op0->hasOneUse() &&
  1979. match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
  1980. MaskedValueIsZero(Op1, C1->getValue(), 0, &I)) {
  1981. Value *NOr = Builder->CreateOr(A, Op1);
  1982. NOr->takeName(Op0);
  1983. return BinaryOperator::CreateXor(NOr, C1);
  1984. }
  1985. // Y|(X^C) -> (X|Y)^C iff Y&C == 0
  1986. if (Op1->hasOneUse() &&
  1987. match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
  1988. MaskedValueIsZero(Op0, C1->getValue(), 0, &I)) {
  1989. Value *NOr = Builder->CreateOr(A, Op0);
  1990. NOr->takeName(Op0);
  1991. return BinaryOperator::CreateXor(NOr, C1);
  1992. }
  1993. // ((~A & B) | A) -> (A | B)
  1994. if (match(Op0, m_And(m_Not(m_Value(A)), m_Value(B))) &&
  1995. match(Op1, m_Specific(A)))
  1996. return BinaryOperator::CreateOr(A, B);
  1997. // ((A & B) | ~A) -> (~A | B)
  1998. if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
  1999. match(Op1, m_Not(m_Specific(A))))
  2000. return BinaryOperator::CreateOr(Builder->CreateNot(A), B);
  2001. // (A & (~B)) | (A ^ B) -> (A ^ B)
  2002. if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
  2003. match(Op1, m_Xor(m_Specific(A), m_Specific(B))))
  2004. return BinaryOperator::CreateXor(A, B);
  2005. // (A ^ B) | ( A & (~B)) -> (A ^ B)
  2006. if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
  2007. match(Op1, m_And(m_Specific(A), m_Not(m_Specific(B)))))
  2008. return BinaryOperator::CreateXor(A, B);
  2009. // (A & C)|(B & D)
  2010. Value *C = nullptr, *D = nullptr;
  2011. if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
  2012. match(Op1, m_And(m_Value(B), m_Value(D)))) {
  2013. Value *V1 = nullptr, *V2 = nullptr;
  2014. C1 = dyn_cast<ConstantInt>(C);
  2015. C2 = dyn_cast<ConstantInt>(D);
  2016. if (C1 && C2) { // (A & C1)|(B & C2)
  2017. if ((C1->getValue() & C2->getValue()) == 0) {
  2018. // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
  2019. // iff (C1&C2) == 0 and (N&~C1) == 0
  2020. if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
  2021. ((V1 == B &&
  2022. MaskedValueIsZero(V2, ~C1->getValue(), 0, &I)) || // (V|N)
  2023. (V2 == B &&
  2024. MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V)
  2025. return BinaryOperator::CreateAnd(A,
  2026. Builder->getInt(C1->getValue()|C2->getValue()));
  2027. // Or commutes, try both ways.
  2028. if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
  2029. ((V1 == A &&
  2030. MaskedValueIsZero(V2, ~C2->getValue(), 0, &I)) || // (V|N)
  2031. (V2 == A &&
  2032. MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V)
  2033. return BinaryOperator::CreateAnd(B,
  2034. Builder->getInt(C1->getValue()|C2->getValue()));
  2035. // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
  2036. // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
  2037. ConstantInt *C3 = nullptr, *C4 = nullptr;
  2038. if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
  2039. (C3->getValue() & ~C1->getValue()) == 0 &&
  2040. match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
  2041. (C4->getValue() & ~C2->getValue()) == 0) {
  2042. V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
  2043. return BinaryOperator::CreateAnd(V2,
  2044. Builder->getInt(C1->getValue()|C2->getValue()));
  2045. }
  2046. }
  2047. }
  2048. // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants.
  2049. // Don't do this for vector select idioms, the code generator doesn't handle
  2050. // them well yet.
  2051. if (!I.getType()->isVectorTy()) {
  2052. if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
  2053. return Match;
  2054. if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
  2055. return Match;
  2056. if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D))
  2057. return Match;
  2058. if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C))
  2059. return Match;
  2060. }
  2061. // ((A&~B)|(~A&B)) -> A^B
  2062. if ((match(C, m_Not(m_Specific(D))) &&
  2063. match(B, m_Not(m_Specific(A)))))
  2064. return BinaryOperator::CreateXor(A, D);
  2065. // ((~B&A)|(~A&B)) -> A^B
  2066. if ((match(A, m_Not(m_Specific(D))) &&
  2067. match(B, m_Not(m_Specific(C)))))
  2068. return BinaryOperator::CreateXor(C, D);
  2069. // ((A&~B)|(B&~A)) -> A^B
  2070. if ((match(C, m_Not(m_Specific(B))) &&
  2071. match(D, m_Not(m_Specific(A)))))
  2072. return BinaryOperator::CreateXor(A, B);
  2073. // ((~B&A)|(B&~A)) -> A^B
  2074. if ((match(A, m_Not(m_Specific(B))) &&
  2075. match(D, m_Not(m_Specific(C)))))
  2076. return BinaryOperator::CreateXor(C, B);
  2077. // ((A|B)&1)|(B&-2) -> (A&1) | B
  2078. if (match(A, m_Or(m_Value(V1), m_Specific(B))) ||
  2079. match(A, m_Or(m_Specific(B), m_Value(V1)))) {
  2080. Instruction *Ret = FoldOrWithConstants(I, Op1, V1, B, C);
  2081. if (Ret) return Ret;
  2082. }
  2083. // (B&-2)|((A|B)&1) -> (A&1) | B
  2084. if (match(B, m_Or(m_Specific(A), m_Value(V1))) ||
  2085. match(B, m_Or(m_Value(V1), m_Specific(A)))) {
  2086. Instruction *Ret = FoldOrWithConstants(I, Op0, A, V1, D);
  2087. if (Ret) return Ret;
  2088. }
  2089. // ((A^B)&1)|(B&-2) -> (A&1) ^ B
  2090. if (match(A, m_Xor(m_Value(V1), m_Specific(B))) ||
  2091. match(A, m_Xor(m_Specific(B), m_Value(V1)))) {
  2092. Instruction *Ret = FoldXorWithConstants(I, Op1, V1, B, C);
  2093. if (Ret) return Ret;
  2094. }
  2095. // (B&-2)|((A^B)&1) -> (A&1) ^ B
  2096. if (match(B, m_Xor(m_Specific(A), m_Value(V1))) ||
  2097. match(B, m_Xor(m_Value(V1), m_Specific(A)))) {
  2098. Instruction *Ret = FoldXorWithConstants(I, Op0, A, V1, D);
  2099. if (Ret) return Ret;
  2100. }
  2101. }
  2102. // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
  2103. if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
  2104. if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
  2105. if (Op1->hasOneUse() || cast<BinaryOperator>(Op1)->hasOneUse())
  2106. return BinaryOperator::CreateOr(Op0, C);
  2107. // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C
  2108. if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
  2109. if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
  2110. if (Op0->hasOneUse() || cast<BinaryOperator>(Op0)->hasOneUse())
  2111. return BinaryOperator::CreateOr(Op1, C);
  2112. // ((B | C) & A) | B -> B | (A & C)
  2113. if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A))))
  2114. return BinaryOperator::CreateOr(Op1, Builder->CreateAnd(A, C));
  2115. // (~A | ~B) == (~(A & B)) - De Morgan's Law
  2116. if (Value *Op0NotVal = dyn_castNotVal(Op0))
  2117. if (Value *Op1NotVal = dyn_castNotVal(Op1))
  2118. if (Op0->hasOneUse() && Op1->hasOneUse()) {
  2119. Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal,
  2120. I.getName()+".demorgan");
  2121. return BinaryOperator::CreateNot(And);
  2122. }
  2123. // Canonicalize xor to the RHS.
  2124. bool SwappedForXor = false;
  2125. if (match(Op0, m_Xor(m_Value(), m_Value()))) {
  2126. std::swap(Op0, Op1);
  2127. SwappedForXor = true;
  2128. }
  2129. // A | ( A ^ B) -> A | B
  2130. // A | (~A ^ B) -> A | ~B
  2131. // (A & B) | (A ^ B)
  2132. if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
  2133. if (Op0 == A || Op0 == B)
  2134. return BinaryOperator::CreateOr(A, B);
  2135. if (match(Op0, m_And(m_Specific(A), m_Specific(B))) ||
  2136. match(Op0, m_And(m_Specific(B), m_Specific(A))))
  2137. return BinaryOperator::CreateOr(A, B);
  2138. if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
  2139. Value *Not = Builder->CreateNot(B, B->getName()+".not");
  2140. return BinaryOperator::CreateOr(Not, Op0);
  2141. }
  2142. if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
  2143. Value *Not = Builder->CreateNot(A, A->getName()+".not");
  2144. return BinaryOperator::CreateOr(Not, Op0);
  2145. }
  2146. }
  2147. // A | ~(A | B) -> A | ~B
  2148. // A | ~(A ^ B) -> A | ~B
  2149. if (match(Op1, m_Not(m_Value(A))))
  2150. if (BinaryOperator *B = dyn_cast<BinaryOperator>(A))
  2151. if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) &&
  2152. Op1->hasOneUse() && (B->getOpcode() == Instruction::Or ||
  2153. B->getOpcode() == Instruction::Xor)) {
  2154. Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
  2155. B->getOperand(0);
  2156. Value *Not = Builder->CreateNot(NotOp, NotOp->getName()+".not");
  2157. return BinaryOperator::CreateOr(Not, Op0);
  2158. }
  2159. // (A & B) | ((~A) ^ B) -> (~A ^ B)
  2160. if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
  2161. match(Op1, m_Xor(m_Not(m_Specific(A)), m_Specific(B))))
  2162. return BinaryOperator::CreateXor(Builder->CreateNot(A), B);
  2163. // ((~A) ^ B) | (A & B) -> (~A ^ B)
  2164. if (match(Op0, m_Xor(m_Not(m_Value(A)), m_Value(B))) &&
  2165. match(Op1, m_And(m_Specific(A), m_Specific(B))))
  2166. return BinaryOperator::CreateXor(Builder->CreateNot(A), B);
  2167. if (SwappedForXor)
  2168. std::swap(Op0, Op1);
  2169. {
  2170. ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
  2171. ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
  2172. if (LHS && RHS)
  2173. if (Value *Res = FoldOrOfICmps(LHS, RHS, &I))
  2174. return ReplaceInstUsesWith(I, Res);
  2175. // TODO: Make this recursive; it's a little tricky because an arbitrary
  2176. // number of 'or' instructions might have to be created.
  2177. Value *X, *Y;
  2178. if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
  2179. if (auto *Cmp = dyn_cast<ICmpInst>(X))
  2180. if (Value *Res = FoldOrOfICmps(LHS, Cmp, &I))
  2181. return ReplaceInstUsesWith(I, Builder->CreateOr(Res, Y));
  2182. if (auto *Cmp = dyn_cast<ICmpInst>(Y))
  2183. if (Value *Res = FoldOrOfICmps(LHS, Cmp, &I))
  2184. return ReplaceInstUsesWith(I, Builder->CreateOr(Res, X));
  2185. }
  2186. if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
  2187. if (auto *Cmp = dyn_cast<ICmpInst>(X))
  2188. if (Value *Res = FoldOrOfICmps(Cmp, RHS, &I))
  2189. return ReplaceInstUsesWith(I, Builder->CreateOr(Res, Y));
  2190. if (auto *Cmp = dyn_cast<ICmpInst>(Y))
  2191. if (Value *Res = FoldOrOfICmps(Cmp, RHS, &I))
  2192. return ReplaceInstUsesWith(I, Builder->CreateOr(Res, X));
  2193. }
  2194. }
  2195. // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
  2196. if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
  2197. if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
  2198. if (Value *Res = FoldOrOfFCmps(LHS, RHS))
  2199. return ReplaceInstUsesWith(I, Res);
  2200. // fold (or (cast A), (cast B)) -> (cast (or A, B))
  2201. if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
  2202. CastInst *Op1C = dyn_cast<CastInst>(Op1);
  2203. if (Op1C && Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
  2204. Type *SrcTy = Op0C->getOperand(0)->getType();
  2205. if (SrcTy == Op1C->getOperand(0)->getType() &&
  2206. SrcTy->isIntOrIntVectorTy()) {
  2207. Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
  2208. if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) &&
  2209. // Only do this if the casts both really cause code to be
  2210. // generated.
  2211. ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
  2212. ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
  2213. Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName());
  2214. return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
  2215. }
  2216. // If this is or(cast(icmp), cast(icmp)), try to fold this even if the
  2217. // cast is otherwise not optimizable. This happens for vector sexts.
  2218. if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
  2219. if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
  2220. if (Value *Res = FoldOrOfICmps(LHS, RHS, &I))
  2221. return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
  2222. // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the
  2223. // cast is otherwise not optimizable. This happens for vector sexts.
  2224. if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
  2225. if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
  2226. if (Value *Res = FoldOrOfFCmps(LHS, RHS))
  2227. return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
  2228. }
  2229. }
  2230. }
  2231. // or(sext(A), B) -> A ? -1 : B where A is an i1
  2232. // or(A, sext(B)) -> B ? -1 : A where B is an i1
  2233. if (match(Op0, m_SExt(m_Value(A))) && A->getType()->isIntegerTy(1))
  2234. return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1);
  2235. if (match(Op1, m_SExt(m_Value(A))) && A->getType()->isIntegerTy(1))
  2236. return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0);
  2237. // Note: If we've gotten to the point of visiting the outer OR, then the
  2238. // inner one couldn't be simplified. If it was a constant, then it won't
  2239. // be simplified by a later pass either, so we try swapping the inner/outer
  2240. // ORs in the hopes that we'll be able to simplify it this way.
  2241. // (X|C) | V --> (X|V) | C
  2242. if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
  2243. match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) {
  2244. Value *Inner = Builder->CreateOr(A, Op1);
  2245. Inner->takeName(Op0);
  2246. return BinaryOperator::CreateOr(Inner, C1);
  2247. }
  2248. // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
  2249. // Since this OR statement hasn't been optimized further yet, we hope
  2250. // that this transformation will allow the new ORs to be optimized.
  2251. {
  2252. Value *X = nullptr, *Y = nullptr;
  2253. if (Op0->hasOneUse() && Op1->hasOneUse() &&
  2254. match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
  2255. match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
  2256. Value *orTrue = Builder->CreateOr(A, C);
  2257. Value *orFalse = Builder->CreateOr(B, D);
  2258. return SelectInst::Create(X, orTrue, orFalse);
  2259. }
  2260. }
  2261. return Changed ? &I : nullptr;
  2262. }
  2263. Instruction *InstCombiner::visitXor(BinaryOperator &I) {
  2264. bool Changed = SimplifyAssociativeOrCommutative(I);
  2265. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  2266. if (Value *V = SimplifyVectorOp(I))
  2267. return ReplaceInstUsesWith(I, V);
  2268. if (Value *V = SimplifyXorInst(Op0, Op1, DL, TLI, DT, AC))
  2269. return ReplaceInstUsesWith(I, V);
  2270. // (A&B)^(A&C) -> A&(B^C) etc
  2271. if (Value *V = SimplifyUsingDistributiveLaws(I))
  2272. return ReplaceInstUsesWith(I, V);
  2273. // See if we can simplify any instructions used by the instruction whose sole
  2274. // purpose is to compute bits we don't care about.
  2275. if (SimplifyDemandedInstructionBits(I))
  2276. return &I;
  2277. if (Value *V = SimplifyBSwap(I))
  2278. return ReplaceInstUsesWith(I, V);
  2279. // Is this a ~ operation?
  2280. if (Value *NotOp = dyn_castNotVal(&I)) {
  2281. if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
  2282. if (Op0I->getOpcode() == Instruction::And ||
  2283. Op0I->getOpcode() == Instruction::Or) {
  2284. // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
  2285. // ~(~X | Y) === (X & ~Y) - De Morgan's Law
  2286. if (dyn_castNotVal(Op0I->getOperand(1)))
  2287. Op0I->swapOperands();
  2288. if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
  2289. Value *NotY =
  2290. Builder->CreateNot(Op0I->getOperand(1),
  2291. Op0I->getOperand(1)->getName()+".not");
  2292. if (Op0I->getOpcode() == Instruction::And)
  2293. return BinaryOperator::CreateOr(Op0NotVal, NotY);
  2294. return BinaryOperator::CreateAnd(Op0NotVal, NotY);
  2295. }
  2296. // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
  2297. // ~(X | Y) === (~X & ~Y) - De Morgan's Law
  2298. if (IsFreeToInvert(Op0I->getOperand(0),
  2299. Op0I->getOperand(0)->hasOneUse()) &&
  2300. IsFreeToInvert(Op0I->getOperand(1),
  2301. Op0I->getOperand(1)->hasOneUse())) {
  2302. Value *NotX =
  2303. Builder->CreateNot(Op0I->getOperand(0), "notlhs");
  2304. Value *NotY =
  2305. Builder->CreateNot(Op0I->getOperand(1), "notrhs");
  2306. if (Op0I->getOpcode() == Instruction::And)
  2307. return BinaryOperator::CreateOr(NotX, NotY);
  2308. return BinaryOperator::CreateAnd(NotX, NotY);
  2309. }
  2310. } else if (Op0I->getOpcode() == Instruction::AShr) {
  2311. // ~(~X >>s Y) --> (X >>s Y)
  2312. if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0)))
  2313. return BinaryOperator::CreateAShr(Op0NotVal, Op0I->getOperand(1));
  2314. }
  2315. }
  2316. }
  2317. if (Constant *RHS = dyn_cast<Constant>(Op1)) {
  2318. if (RHS->isAllOnesValue() && Op0->hasOneUse())
  2319. // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
  2320. if (CmpInst *CI = dyn_cast<CmpInst>(Op0))
  2321. return CmpInst::Create(CI->getOpcode(),
  2322. CI->getInversePredicate(),
  2323. CI->getOperand(0), CI->getOperand(1));
  2324. }
  2325. if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
  2326. // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
  2327. if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
  2328. if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
  2329. if (CI->hasOneUse() && Op0C->hasOneUse()) {
  2330. Instruction::CastOps Opcode = Op0C->getOpcode();
  2331. if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
  2332. (RHS == ConstantExpr::getCast(Opcode, Builder->getTrue(),
  2333. Op0C->getDestTy()))) {
  2334. CI->setPredicate(CI->getInversePredicate());
  2335. return CastInst::Create(Opcode, CI, Op0C->getType());
  2336. }
  2337. }
  2338. }
  2339. }
  2340. if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
  2341. // ~(c-X) == X-c-1 == X+(-c-1)
  2342. if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
  2343. if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
  2344. Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
  2345. Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
  2346. ConstantInt::get(I.getType(), 1));
  2347. return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
  2348. }
  2349. if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
  2350. if (Op0I->getOpcode() == Instruction::Add) {
  2351. // ~(X-c) --> (-c-1)-X
  2352. if (RHS->isAllOnesValue()) {
  2353. Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
  2354. return BinaryOperator::CreateSub(
  2355. ConstantExpr::getSub(NegOp0CI,
  2356. ConstantInt::get(I.getType(), 1)),
  2357. Op0I->getOperand(0));
  2358. } else if (RHS->getValue().isSignBit()) {
  2359. // (X + C) ^ signbit -> (X + C + signbit)
  2360. Constant *C = Builder->getInt(RHS->getValue() + Op0CI->getValue());
  2361. return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
  2362. }
  2363. } else if (Op0I->getOpcode() == Instruction::Or) {
  2364. // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
  2365. if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue(),
  2366. 0, &I)) {
  2367. Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
  2368. // Anything in both C1 and C2 is known to be zero, remove it from
  2369. // NewRHS.
  2370. Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
  2371. NewRHS = ConstantExpr::getAnd(NewRHS,
  2372. ConstantExpr::getNot(CommonBits));
  2373. Worklist.Add(Op0I);
  2374. I.setOperand(0, Op0I->getOperand(0));
  2375. I.setOperand(1, NewRHS);
  2376. return &I;
  2377. }
  2378. } else if (Op0I->getOpcode() == Instruction::LShr) {
  2379. // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
  2380. // E1 = "X ^ C1"
  2381. BinaryOperator *E1;
  2382. ConstantInt *C1;
  2383. if (Op0I->hasOneUse() &&
  2384. (E1 = dyn_cast<BinaryOperator>(Op0I->getOperand(0))) &&
  2385. E1->getOpcode() == Instruction::Xor &&
  2386. (C1 = dyn_cast<ConstantInt>(E1->getOperand(1)))) {
  2387. // fold (C1 >> C2) ^ C3
  2388. ConstantInt *C2 = Op0CI, *C3 = RHS;
  2389. APInt FoldConst = C1->getValue().lshr(C2->getValue());
  2390. FoldConst ^= C3->getValue();
  2391. // Prepare the two operands.
  2392. Value *Opnd0 = Builder->CreateLShr(E1->getOperand(0), C2);
  2393. Opnd0->takeName(Op0I);
  2394. cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc());
  2395. Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst);
  2396. return BinaryOperator::CreateXor(Opnd0, FoldVal);
  2397. }
  2398. }
  2399. }
  2400. }
  2401. // Try to fold constant and into select arguments.
  2402. if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
  2403. if (Instruction *R = FoldOpIntoSelect(I, SI))
  2404. return R;
  2405. if (isa<PHINode>(Op0))
  2406. if (Instruction *NV = FoldOpIntoPhi(I))
  2407. return NV;
  2408. }
  2409. BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
  2410. if (Op1I) {
  2411. Value *A, *B;
  2412. if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
  2413. if (A == Op0) { // B^(B|A) == (A|B)^B
  2414. Op1I->swapOperands();
  2415. I.swapOperands();
  2416. std::swap(Op0, Op1);
  2417. } else if (B == Op0) { // B^(A|B) == (A|B)^B
  2418. I.swapOperands(); // Simplified below.
  2419. std::swap(Op0, Op1);
  2420. }
  2421. } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
  2422. Op1I->hasOneUse()){
  2423. if (A == Op0) { // A^(A&B) -> A^(B&A)
  2424. Op1I->swapOperands();
  2425. std::swap(A, B);
  2426. }
  2427. if (B == Op0) { // A^(B&A) -> (B&A)^A
  2428. I.swapOperands(); // Simplified below.
  2429. std::swap(Op0, Op1);
  2430. }
  2431. }
  2432. }
  2433. BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
  2434. if (Op0I) {
  2435. Value *A, *B;
  2436. if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
  2437. Op0I->hasOneUse()) {
  2438. if (A == Op1) // (B|A)^B == (A|B)^B
  2439. std::swap(A, B);
  2440. if (B == Op1) // (A|B)^B == A & ~B
  2441. return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1));
  2442. } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
  2443. Op0I->hasOneUse()){
  2444. if (A == Op1) // (A&B)^A -> (B&A)^A
  2445. std::swap(A, B);
  2446. if (B == Op1 && // (B&A)^A == ~B & A
  2447. !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
  2448. return BinaryOperator::CreateAnd(Builder->CreateNot(A), Op1);
  2449. }
  2450. }
  2451. }
  2452. if (Op0I && Op1I) {
  2453. Value *A, *B, *C, *D;
  2454. // (A & B)^(A | B) -> A ^ B
  2455. if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
  2456. match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
  2457. if ((A == C && B == D) || (A == D && B == C))
  2458. return BinaryOperator::CreateXor(A, B);
  2459. }
  2460. // (A | B)^(A & B) -> A ^ B
  2461. if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
  2462. match(Op1I, m_And(m_Value(C), m_Value(D)))) {
  2463. if ((A == C && B == D) || (A == D && B == C))
  2464. return BinaryOperator::CreateXor(A, B);
  2465. }
  2466. // (A | ~B) ^ (~A | B) -> A ^ B
  2467. if (match(Op0I, m_Or(m_Value(A), m_Not(m_Value(B)))) &&
  2468. match(Op1I, m_Or(m_Not(m_Specific(A)), m_Specific(B)))) {
  2469. return BinaryOperator::CreateXor(A, B);
  2470. }
  2471. // (~A | B) ^ (A | ~B) -> A ^ B
  2472. if (match(Op0I, m_Or(m_Not(m_Value(A)), m_Value(B))) &&
  2473. match(Op1I, m_Or(m_Specific(A), m_Not(m_Specific(B))))) {
  2474. return BinaryOperator::CreateXor(A, B);
  2475. }
  2476. // (A & ~B) ^ (~A & B) -> A ^ B
  2477. if (match(Op0I, m_And(m_Value(A), m_Not(m_Value(B)))) &&
  2478. match(Op1I, m_And(m_Not(m_Specific(A)), m_Specific(B)))) {
  2479. return BinaryOperator::CreateXor(A, B);
  2480. }
  2481. // (~A & B) ^ (A & ~B) -> A ^ B
  2482. if (match(Op0I, m_And(m_Not(m_Value(A)), m_Value(B))) &&
  2483. match(Op1I, m_And(m_Specific(A), m_Not(m_Specific(B))))) {
  2484. return BinaryOperator::CreateXor(A, B);
  2485. }
  2486. // (A ^ C)^(A | B) -> ((~A) & B) ^ C
  2487. if (match(Op0I, m_Xor(m_Value(D), m_Value(C))) &&
  2488. match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
  2489. if (D == A)
  2490. return BinaryOperator::CreateXor(
  2491. Builder->CreateAnd(Builder->CreateNot(A), B), C);
  2492. if (D == B)
  2493. return BinaryOperator::CreateXor(
  2494. Builder->CreateAnd(Builder->CreateNot(B), A), C);
  2495. }
  2496. // (A | B)^(A ^ C) -> ((~A) & B) ^ C
  2497. if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
  2498. match(Op1I, m_Xor(m_Value(D), m_Value(C)))) {
  2499. if (D == A)
  2500. return BinaryOperator::CreateXor(
  2501. Builder->CreateAnd(Builder->CreateNot(A), B), C);
  2502. if (D == B)
  2503. return BinaryOperator::CreateXor(
  2504. Builder->CreateAnd(Builder->CreateNot(B), A), C);
  2505. }
  2506. // (A & B) ^ (A ^ B) -> (A | B)
  2507. if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
  2508. match(Op1I, m_Xor(m_Specific(A), m_Specific(B))))
  2509. return BinaryOperator::CreateOr(A, B);
  2510. // (A ^ B) ^ (A & B) -> (A | B)
  2511. if (match(Op0I, m_Xor(m_Value(A), m_Value(B))) &&
  2512. match(Op1I, m_And(m_Specific(A), m_Specific(B))))
  2513. return BinaryOperator::CreateOr(A, B);
  2514. }
  2515. Value *A = nullptr, *B = nullptr;
  2516. // (A & ~B) ^ (~A) -> ~(A & B)
  2517. if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
  2518. match(Op1, m_Not(m_Specific(A))))
  2519. return BinaryOperator::CreateNot(Builder->CreateAnd(A, B));
  2520. // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
  2521. if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
  2522. if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
  2523. if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
  2524. if (LHS->getOperand(0) == RHS->getOperand(1) &&
  2525. LHS->getOperand(1) == RHS->getOperand(0))
  2526. LHS->swapOperands();
  2527. if (LHS->getOperand(0) == RHS->getOperand(0) &&
  2528. LHS->getOperand(1) == RHS->getOperand(1)) {
  2529. Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
  2530. unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
  2531. bool isSigned = LHS->isSigned() || RHS->isSigned();
  2532. return ReplaceInstUsesWith(I,
  2533. getNewICmpValue(isSigned, Code, Op0, Op1,
  2534. Builder));
  2535. }
  2536. }
  2537. // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
  2538. if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
  2539. if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
  2540. if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
  2541. Type *SrcTy = Op0C->getOperand(0)->getType();
  2542. if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() &&
  2543. // Only do this if the casts both really cause code to be generated.
  2544. ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0),
  2545. I.getType()) &&
  2546. ShouldOptimizeCast(Op1C->getOpcode(), Op1C->getOperand(0),
  2547. I.getType())) {
  2548. Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
  2549. Op1C->getOperand(0), I.getName());
  2550. return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
  2551. }
  2552. }
  2553. }
  2554. return Changed ? &I : nullptr;
  2555. }