InstCombineCompares.cpp 168 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124
  1. //===- InstCombineCompares.cpp --------------------------------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements the visitICmp and visitFCmp functions.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "InstCombineInternal.h"
  14. #include "llvm/ADT/APSInt.h"
  15. #include "llvm/ADT/Statistic.h"
  16. #include "llvm/Analysis/ConstantFolding.h"
  17. #include "llvm/Analysis/InstructionSimplify.h"
  18. #include "llvm/Analysis/MemoryBuiltins.h"
  19. #include "llvm/IR/ConstantRange.h"
  20. #include "llvm/IR/DataLayout.h"
  21. #include "llvm/IR/GetElementPtrTypeIterator.h"
  22. #include "llvm/IR/IntrinsicInst.h"
  23. #include "llvm/IR/PatternMatch.h"
  24. #include "llvm/Support/CommandLine.h"
  25. #include "llvm/Support/Debug.h"
  26. #include "llvm/Analysis/TargetLibraryInfo.h"
  27. using namespace llvm;
  28. using namespace PatternMatch;
  29. #define DEBUG_TYPE "instcombine"
  30. // How many times is a select replaced by one of its operands?
  31. STATISTIC(NumSel, "Number of select opts");
  32. // Initialization Routines
  33. static ConstantInt *getOne(Constant *C) {
  34. return ConstantInt::get(cast<IntegerType>(C->getType()), 1);
  35. }
  36. static ConstantInt *ExtractElement(Constant *V, Constant *Idx) {
  37. return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
  38. }
  39. static bool HasAddOverflow(ConstantInt *Result,
  40. ConstantInt *In1, ConstantInt *In2,
  41. bool IsSigned) {
  42. if (!IsSigned)
  43. return Result->getValue().ult(In1->getValue());
  44. if (In2->isNegative())
  45. return Result->getValue().sgt(In1->getValue());
  46. return Result->getValue().slt(In1->getValue());
  47. }
  48. /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
  49. /// overflowed for this type.
  50. static bool AddWithOverflow(Constant *&Result, Constant *In1,
  51. Constant *In2, bool IsSigned = false) {
  52. Result = ConstantExpr::getAdd(In1, In2);
  53. if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
  54. for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
  55. Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
  56. if (HasAddOverflow(ExtractElement(Result, Idx),
  57. ExtractElement(In1, Idx),
  58. ExtractElement(In2, Idx),
  59. IsSigned))
  60. return true;
  61. }
  62. return false;
  63. }
  64. return HasAddOverflow(cast<ConstantInt>(Result),
  65. cast<ConstantInt>(In1), cast<ConstantInt>(In2),
  66. IsSigned);
  67. }
  68. static bool HasSubOverflow(ConstantInt *Result,
  69. ConstantInt *In1, ConstantInt *In2,
  70. bool IsSigned) {
  71. if (!IsSigned)
  72. return Result->getValue().ugt(In1->getValue());
  73. if (In2->isNegative())
  74. return Result->getValue().slt(In1->getValue());
  75. return Result->getValue().sgt(In1->getValue());
  76. }
  77. /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
  78. /// overflowed for this type.
  79. static bool SubWithOverflow(Constant *&Result, Constant *In1,
  80. Constant *In2, bool IsSigned = false) {
  81. Result = ConstantExpr::getSub(In1, In2);
  82. if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
  83. for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
  84. Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
  85. if (HasSubOverflow(ExtractElement(Result, Idx),
  86. ExtractElement(In1, Idx),
  87. ExtractElement(In2, Idx),
  88. IsSigned))
  89. return true;
  90. }
  91. return false;
  92. }
  93. return HasSubOverflow(cast<ConstantInt>(Result),
  94. cast<ConstantInt>(In1), cast<ConstantInt>(In2),
  95. IsSigned);
  96. }
  97. /// isSignBitCheck - Given an exploded icmp instruction, return true if the
  98. /// comparison only checks the sign bit. If it only checks the sign bit, set
  99. /// TrueIfSigned if the result of the comparison is true when the input value is
  100. /// signed.
  101. static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS,
  102. bool &TrueIfSigned) {
  103. switch (pred) {
  104. case ICmpInst::ICMP_SLT: // True if LHS s< 0
  105. TrueIfSigned = true;
  106. return RHS->isZero();
  107. case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
  108. TrueIfSigned = true;
  109. return RHS->isAllOnesValue();
  110. case ICmpInst::ICMP_SGT: // True if LHS s> -1
  111. TrueIfSigned = false;
  112. return RHS->isAllOnesValue();
  113. case ICmpInst::ICMP_UGT:
  114. // True if LHS u> RHS and RHS == high-bit-mask - 1
  115. TrueIfSigned = true;
  116. return RHS->isMaxValue(true);
  117. case ICmpInst::ICMP_UGE:
  118. // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
  119. TrueIfSigned = true;
  120. return RHS->getValue().isSignBit();
  121. default:
  122. return false;
  123. }
  124. }
  125. /// Returns true if the exploded icmp can be expressed as a signed comparison
  126. /// to zero and updates the predicate accordingly.
  127. /// The signedness of the comparison is preserved.
  128. static bool isSignTest(ICmpInst::Predicate &pred, const ConstantInt *RHS) {
  129. if (!ICmpInst::isSigned(pred))
  130. return false;
  131. if (RHS->isZero())
  132. return ICmpInst::isRelational(pred);
  133. if (RHS->isOne()) {
  134. if (pred == ICmpInst::ICMP_SLT) {
  135. pred = ICmpInst::ICMP_SLE;
  136. return true;
  137. }
  138. } else if (RHS->isAllOnesValue()) {
  139. if (pred == ICmpInst::ICMP_SGT) {
  140. pred = ICmpInst::ICMP_SGE;
  141. return true;
  142. }
  143. }
  144. return false;
  145. }
  146. // isHighOnes - Return true if the constant is of the form 1+0+.
  147. // This is the same as lowones(~X).
  148. static bool isHighOnes(const ConstantInt *CI) {
  149. return (~CI->getValue() + 1).isPowerOf2();
  150. }
  151. /// ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
  152. /// set of known zero and one bits, compute the maximum and minimum values that
  153. /// could have the specified known zero and known one bits, returning them in
  154. /// min/max.
  155. static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero,
  156. const APInt& KnownOne,
  157. APInt& Min, APInt& Max) {
  158. assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
  159. KnownZero.getBitWidth() == Min.getBitWidth() &&
  160. KnownZero.getBitWidth() == Max.getBitWidth() &&
  161. "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
  162. APInt UnknownBits = ~(KnownZero|KnownOne);
  163. // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
  164. // bit if it is unknown.
  165. Min = KnownOne;
  166. Max = KnownOne|UnknownBits;
  167. if (UnknownBits.isNegative()) { // Sign bit is unknown
  168. Min.setBit(Min.getBitWidth()-1);
  169. Max.clearBit(Max.getBitWidth()-1);
  170. }
  171. }
  172. // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
  173. // a set of known zero and one bits, compute the maximum and minimum values that
  174. // could have the specified known zero and known one bits, returning them in
  175. // min/max.
  176. static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
  177. const APInt &KnownOne,
  178. APInt &Min, APInt &Max) {
  179. assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
  180. KnownZero.getBitWidth() == Min.getBitWidth() &&
  181. KnownZero.getBitWidth() == Max.getBitWidth() &&
  182. "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
  183. APInt UnknownBits = ~(KnownZero|KnownOne);
  184. // The minimum value is when the unknown bits are all zeros.
  185. Min = KnownOne;
  186. // The maximum value is when the unknown bits are all ones.
  187. Max = KnownOne|UnknownBits;
  188. }
  189. /// FoldCmpLoadFromIndexedGlobal - Called we see this pattern:
  190. /// cmp pred (load (gep GV, ...)), cmpcst
  191. /// where GV is a global variable with a constant initializer. Try to simplify
  192. /// this into some simple computation that does not need the load. For example
  193. /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
  194. ///
  195. /// If AndCst is non-null, then the loaded value is masked with that constant
  196. /// before doing the comparison. This handles cases like "A[i]&4 == 0".
  197. Instruction *InstCombiner::
  198. FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
  199. CmpInst &ICI, ConstantInt *AndCst) {
  200. Constant *Init = GV->getInitializer();
  201. if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
  202. return nullptr;
  203. uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
  204. if (ArrayElementCount > 1024) return nullptr; // Don't blow up on huge arrays.
  205. // There are many forms of this optimization we can handle, for now, just do
  206. // the simple index into a single-dimensional array.
  207. //
  208. // Require: GEP GV, 0, i {{, constant indices}}
  209. if (GEP->getNumOperands() < 3 ||
  210. !isa<ConstantInt>(GEP->getOperand(1)) ||
  211. !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
  212. isa<Constant>(GEP->getOperand(2)))
  213. return nullptr;
  214. // Check that indices after the variable are constants and in-range for the
  215. // type they index. Collect the indices. This is typically for arrays of
  216. // structs.
  217. SmallVector<unsigned, 4> LaterIndices;
  218. Type *EltTy = Init->getType()->getArrayElementType();
  219. for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
  220. ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
  221. if (!Idx) return nullptr; // Variable index.
  222. uint64_t IdxVal = Idx->getZExtValue();
  223. if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
  224. if (StructType *STy = dyn_cast<StructType>(EltTy))
  225. EltTy = STy->getElementType(IdxVal);
  226. else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
  227. if (IdxVal >= ATy->getNumElements()) return nullptr;
  228. EltTy = ATy->getElementType();
  229. } else {
  230. return nullptr; // Unknown type.
  231. }
  232. LaterIndices.push_back(IdxVal);
  233. }
  234. enum { Overdefined = -3, Undefined = -2 };
  235. // Variables for our state machines.
  236. // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
  237. // "i == 47 | i == 87", where 47 is the first index the condition is true for,
  238. // and 87 is the second (and last) index. FirstTrueElement is -2 when
  239. // undefined, otherwise set to the first true element. SecondTrueElement is
  240. // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
  241. int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
  242. // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
  243. // form "i != 47 & i != 87". Same state transitions as for true elements.
  244. int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
  245. /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
  246. /// define a state machine that triggers for ranges of values that the index
  247. /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
  248. /// This is -2 when undefined, -3 when overdefined, and otherwise the last
  249. /// index in the range (inclusive). We use -2 for undefined here because we
  250. /// use relative comparisons and don't want 0-1 to match -1.
  251. int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
  252. // MagicBitvector - This is a magic bitvector where we set a bit if the
  253. // comparison is true for element 'i'. If there are 64 elements or less in
  254. // the array, this will fully represent all the comparison results.
  255. uint64_t MagicBitvector = 0;
  256. // Scan the array and see if one of our patterns matches.
  257. Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
  258. for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
  259. Constant *Elt = Init->getAggregateElement(i);
  260. if (!Elt) return nullptr;
  261. // If this is indexing an array of structures, get the structure element.
  262. if (!LaterIndices.empty())
  263. Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
  264. // If the element is masked, handle it.
  265. if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
  266. // Find out if the comparison would be true or false for the i'th element.
  267. Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
  268. CompareRHS, DL, TLI);
  269. // If the result is undef for this element, ignore it.
  270. if (isa<UndefValue>(C)) {
  271. // Extend range state machines to cover this element in case there is an
  272. // undef in the middle of the range.
  273. if (TrueRangeEnd == (int)i-1)
  274. TrueRangeEnd = i;
  275. if (FalseRangeEnd == (int)i-1)
  276. FalseRangeEnd = i;
  277. continue;
  278. }
  279. // If we can't compute the result for any of the elements, we have to give
  280. // up evaluating the entire conditional.
  281. if (!isa<ConstantInt>(C)) return nullptr;
  282. // Otherwise, we know if the comparison is true or false for this element,
  283. // update our state machines.
  284. bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
  285. // State machine for single/double/range index comparison.
  286. if (IsTrueForElt) {
  287. // Update the TrueElement state machine.
  288. if (FirstTrueElement == Undefined)
  289. FirstTrueElement = TrueRangeEnd = i; // First true element.
  290. else {
  291. // Update double-compare state machine.
  292. if (SecondTrueElement == Undefined)
  293. SecondTrueElement = i;
  294. else
  295. SecondTrueElement = Overdefined;
  296. // Update range state machine.
  297. if (TrueRangeEnd == (int)i-1)
  298. TrueRangeEnd = i;
  299. else
  300. TrueRangeEnd = Overdefined;
  301. }
  302. } else {
  303. // Update the FalseElement state machine.
  304. if (FirstFalseElement == Undefined)
  305. FirstFalseElement = FalseRangeEnd = i; // First false element.
  306. else {
  307. // Update double-compare state machine.
  308. if (SecondFalseElement == Undefined)
  309. SecondFalseElement = i;
  310. else
  311. SecondFalseElement = Overdefined;
  312. // Update range state machine.
  313. if (FalseRangeEnd == (int)i-1)
  314. FalseRangeEnd = i;
  315. else
  316. FalseRangeEnd = Overdefined;
  317. }
  318. }
  319. // If this element is in range, update our magic bitvector.
  320. if (i < 64 && IsTrueForElt)
  321. MagicBitvector |= 1ULL << i;
  322. // If all of our states become overdefined, bail out early. Since the
  323. // predicate is expensive, only check it every 8 elements. This is only
  324. // really useful for really huge arrays.
  325. if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
  326. SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
  327. FalseRangeEnd == Overdefined)
  328. return nullptr;
  329. }
  330. // Now that we've scanned the entire array, emit our new comparison(s). We
  331. // order the state machines in complexity of the generated code.
  332. Value *Idx = GEP->getOperand(2);
  333. // If the index is larger than the pointer size of the target, truncate the
  334. // index down like the GEP would do implicitly. We don't have to do this for
  335. // an inbounds GEP because the index can't be out of range.
  336. if (!GEP->isInBounds()) {
  337. Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
  338. unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
  339. if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
  340. Idx = Builder->CreateTrunc(Idx, IntPtrTy);
  341. }
  342. // If the comparison is only true for one or two elements, emit direct
  343. // comparisons.
  344. if (SecondTrueElement != Overdefined) {
  345. // None true -> false.
  346. if (FirstTrueElement == Undefined)
  347. return ReplaceInstUsesWith(ICI, Builder->getFalse());
  348. Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
  349. // True for one element -> 'i == 47'.
  350. if (SecondTrueElement == Undefined)
  351. return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
  352. // True for two elements -> 'i == 47 | i == 72'.
  353. Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx);
  354. Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
  355. Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx);
  356. return BinaryOperator::CreateOr(C1, C2);
  357. }
  358. // If the comparison is only false for one or two elements, emit direct
  359. // comparisons.
  360. if (SecondFalseElement != Overdefined) {
  361. // None false -> true.
  362. if (FirstFalseElement == Undefined)
  363. return ReplaceInstUsesWith(ICI, Builder->getTrue());
  364. Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
  365. // False for one element -> 'i != 47'.
  366. if (SecondFalseElement == Undefined)
  367. return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
  368. // False for two elements -> 'i != 47 & i != 72'.
  369. Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx);
  370. Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
  371. Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx);
  372. return BinaryOperator::CreateAnd(C1, C2);
  373. }
  374. // If the comparison can be replaced with a range comparison for the elements
  375. // where it is true, emit the range check.
  376. if (TrueRangeEnd != Overdefined) {
  377. assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
  378. // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
  379. if (FirstTrueElement) {
  380. Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
  381. Idx = Builder->CreateAdd(Idx, Offs);
  382. }
  383. Value *End = ConstantInt::get(Idx->getType(),
  384. TrueRangeEnd-FirstTrueElement+1);
  385. return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
  386. }
  387. // False range check.
  388. if (FalseRangeEnd != Overdefined) {
  389. assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
  390. // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
  391. if (FirstFalseElement) {
  392. Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
  393. Idx = Builder->CreateAdd(Idx, Offs);
  394. }
  395. Value *End = ConstantInt::get(Idx->getType(),
  396. FalseRangeEnd-FirstFalseElement);
  397. return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
  398. }
  399. // If a magic bitvector captures the entire comparison state
  400. // of this load, replace it with computation that does:
  401. // ((magic_cst >> i) & 1) != 0
  402. {
  403. Type *Ty = nullptr;
  404. // Look for an appropriate type:
  405. // - The type of Idx if the magic fits
  406. // - The smallest fitting legal type if we have a DataLayout
  407. // - Default to i32
  408. if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
  409. Ty = Idx->getType();
  410. // HLSL Change Begins: Don't introduce use of i64 here.
  411. // TODO: Find a way to do this safely.
  412. //else
  413. // Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
  414. // Use i32 if index type was i16 and too small, for instance
  415. else if (ArrayElementCount <= 32)
  416. Ty = Builder->getInt32Ty();
  417. // HLSL Change Ends
  418. if (Ty) {
  419. Value *V = Builder->CreateIntCast(Idx, Ty, false);
  420. V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
  421. V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
  422. return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
  423. }
  424. // HLSL Change Begins: Generate 32-bit pattern for 64-bit case for now.
  425. else if (ArrayElementCount <= 64) {
  426. Ty = Builder->getInt32Ty();
  427. Value *V = Builder->CreateIntCast(Idx, Ty, false);
  428. Value *Cmp = Builder->CreateICmpULT(V, ConstantInt::get(Ty, 32));
  429. Value *Sel = Builder->CreateSelect(Cmp,
  430. ConstantInt::get(Ty, MagicBitvector & 0xFFFFFFFF),
  431. ConstantInt::get(Ty, (MagicBitvector >> 32) & 0xFFFFFFFF));
  432. Value *Shift = Builder->CreateAnd(V, ConstantInt::get(Ty, 0x1F));
  433. V = Builder->CreateShl(ConstantInt::get(Ty, 0x1), Shift);
  434. V = Builder->CreateAnd(Sel, V);
  435. return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
  436. }
  437. // HLSL Change Ends
  438. }
  439. return nullptr;
  440. }
  441. /// EvaluateGEPOffsetExpression - Return a value that can be used to compare
  442. /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
  443. /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
  444. /// be complex, and scales are involved. The above expression would also be
  445. /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
  446. /// This later form is less amenable to optimization though, and we are allowed
  447. /// to generate the first by knowing that pointer arithmetic doesn't overflow.
  448. ///
  449. /// If we can't emit an optimized form for this expression, this returns null.
  450. ///
  451. static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
  452. const DataLayout &DL) {
  453. gep_type_iterator GTI = gep_type_begin(GEP);
  454. // Check to see if this gep only has a single variable index. If so, and if
  455. // any constant indices are a multiple of its scale, then we can compute this
  456. // in terms of the scale of the variable index. For example, if the GEP
  457. // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
  458. // because the expression will cross zero at the same point.
  459. unsigned i, e = GEP->getNumOperands();
  460. int64_t Offset = 0;
  461. for (i = 1; i != e; ++i, ++GTI) {
  462. if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
  463. // Compute the aggregate offset of constant indices.
  464. if (CI->isZero()) continue;
  465. // Handle a struct index, which adds its field offset to the pointer.
  466. if (StructType *STy = dyn_cast<StructType>(*GTI)) {
  467. Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
  468. } else {
  469. uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
  470. Offset += Size*CI->getSExtValue();
  471. }
  472. } else {
  473. // Found our variable index.
  474. break;
  475. }
  476. }
  477. // If there are no variable indices, we must have a constant offset, just
  478. // evaluate it the general way.
  479. if (i == e) return nullptr;
  480. Value *VariableIdx = GEP->getOperand(i);
  481. // Determine the scale factor of the variable element. For example, this is
  482. // 4 if the variable index is into an array of i32.
  483. uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
  484. // Verify that there are no other variable indices. If so, emit the hard way.
  485. for (++i, ++GTI; i != e; ++i, ++GTI) {
  486. ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
  487. if (!CI) return nullptr;
  488. // Compute the aggregate offset of constant indices.
  489. if (CI->isZero()) continue;
  490. // Handle a struct index, which adds its field offset to the pointer.
  491. if (StructType *STy = dyn_cast<StructType>(*GTI)) {
  492. Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
  493. } else {
  494. uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
  495. Offset += Size*CI->getSExtValue();
  496. }
  497. }
  498. // Okay, we know we have a single variable index, which must be a
  499. // pointer/array/vector index. If there is no offset, life is simple, return
  500. // the index.
  501. Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
  502. unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
  503. if (Offset == 0) {
  504. // Cast to intptrty in case a truncation occurs. If an extension is needed,
  505. // we don't need to bother extending: the extension won't affect where the
  506. // computation crosses zero.
  507. if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
  508. VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
  509. }
  510. return VariableIdx;
  511. }
  512. // Otherwise, there is an index. The computation we will do will be modulo
  513. // the pointer size, so get it.
  514. uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
  515. Offset &= PtrSizeMask;
  516. VariableScale &= PtrSizeMask;
  517. // To do this transformation, any constant index must be a multiple of the
  518. // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
  519. // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
  520. // multiple of the variable scale.
  521. int64_t NewOffs = Offset / (int64_t)VariableScale;
  522. if (Offset != NewOffs*(int64_t)VariableScale)
  523. return nullptr;
  524. // Okay, we can do this evaluation. Start by converting the index to intptr.
  525. if (VariableIdx->getType() != IntPtrTy)
  526. VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
  527. true /*Signed*/);
  528. Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
  529. return IC.Builder->CreateAdd(VariableIdx, OffsetVal, "offset");
  530. }
  531. /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
  532. /// else. At this point we know that the GEP is on the LHS of the comparison.
  533. Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
  534. ICmpInst::Predicate Cond,
  535. Instruction &I) {
  536. // Don't transform signed compares of GEPs into index compares. Even if the
  537. // GEP is inbounds, the final add of the base pointer can have signed overflow
  538. // and would change the result of the icmp.
  539. // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
  540. // the maximum signed value for the pointer type.
  541. if (ICmpInst::isSigned(Cond))
  542. return nullptr;
  543. // Look through bitcasts and addrspacecasts. We do not however want to remove
  544. // 0 GEPs.
  545. if (!isa<GetElementPtrInst>(RHS))
  546. RHS = RHS->stripPointerCasts();
  547. Value *PtrBase = GEPLHS->getOperand(0);
  548. if (PtrBase == RHS && GEPLHS->isInBounds()) {
  549. // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
  550. // This transformation (ignoring the base and scales) is valid because we
  551. // know pointers can't overflow since the gep is inbounds. See if we can
  552. // output an optimized form.
  553. Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, *this, DL);
  554. // If not, synthesize the offset the hard way.
  555. if (!Offset)
  556. Offset = EmitGEPOffset(GEPLHS);
  557. return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
  558. Constant::getNullValue(Offset->getType()));
  559. } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
  560. // If the base pointers are different, but the indices are the same, just
  561. // compare the base pointer.
  562. if (PtrBase != GEPRHS->getOperand(0)) {
  563. bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
  564. IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
  565. GEPRHS->getOperand(0)->getType();
  566. if (IndicesTheSame)
  567. for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
  568. if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
  569. IndicesTheSame = false;
  570. break;
  571. }
  572. // If all indices are the same, just compare the base pointers.
  573. if (IndicesTheSame)
  574. return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
  575. // If we're comparing GEPs with two base pointers that only differ in type
  576. // and both GEPs have only constant indices or just one use, then fold
  577. // the compare with the adjusted indices.
  578. if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
  579. (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
  580. (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
  581. PtrBase->stripPointerCasts() ==
  582. GEPRHS->getOperand(0)->stripPointerCasts()) {
  583. Value *LOffset = EmitGEPOffset(GEPLHS);
  584. Value *ROffset = EmitGEPOffset(GEPRHS);
  585. // If we looked through an addrspacecast between different sized address
  586. // spaces, the LHS and RHS pointers are different sized
  587. // integers. Truncate to the smaller one.
  588. Type *LHSIndexTy = LOffset->getType();
  589. Type *RHSIndexTy = ROffset->getType();
  590. if (LHSIndexTy != RHSIndexTy) {
  591. if (LHSIndexTy->getPrimitiveSizeInBits() <
  592. RHSIndexTy->getPrimitiveSizeInBits()) {
  593. ROffset = Builder->CreateTrunc(ROffset, LHSIndexTy);
  594. } else
  595. LOffset = Builder->CreateTrunc(LOffset, RHSIndexTy);
  596. }
  597. Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond),
  598. LOffset, ROffset);
  599. return ReplaceInstUsesWith(I, Cmp);
  600. }
  601. // Otherwise, the base pointers are different and the indices are
  602. // different, bail out.
  603. return nullptr;
  604. }
  605. // If one of the GEPs has all zero indices, recurse.
  606. if (GEPLHS->hasAllZeroIndices())
  607. return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
  608. ICmpInst::getSwappedPredicate(Cond), I);
  609. // If the other GEP has all zero indices, recurse.
  610. if (GEPRHS->hasAllZeroIndices())
  611. return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
  612. bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
  613. if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
  614. // If the GEPs only differ by one index, compare it.
  615. unsigned NumDifferences = 0; // Keep track of # differences.
  616. unsigned DiffOperand = 0; // The operand that differs.
  617. for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
  618. if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
  619. if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
  620. GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
  621. // Irreconcilable differences.
  622. NumDifferences = 2;
  623. break;
  624. } else {
  625. if (NumDifferences++) break;
  626. DiffOperand = i;
  627. }
  628. }
  629. if (NumDifferences == 0) // SAME GEP?
  630. return ReplaceInstUsesWith(I, // No comparison is needed here.
  631. Builder->getInt1(ICmpInst::isTrueWhenEqual(Cond)));
  632. else if (NumDifferences == 1 && GEPsInBounds) {
  633. Value *LHSV = GEPLHS->getOperand(DiffOperand);
  634. Value *RHSV = GEPRHS->getOperand(DiffOperand);
  635. // Make sure we do a signed comparison here.
  636. return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
  637. }
  638. }
  639. // Only lower this if the icmp is the only user of the GEP or if we expect
  640. // the result to fold to a constant!
  641. if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
  642. (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
  643. // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
  644. Value *L = EmitGEPOffset(GEPLHS);
  645. Value *R = EmitGEPOffset(GEPRHS);
  646. return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
  647. }
  648. }
  649. return nullptr;
  650. }
  651. /// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X".
  652. Instruction *InstCombiner::FoldICmpAddOpCst(Instruction &ICI,
  653. Value *X, ConstantInt *CI,
  654. ICmpInst::Predicate Pred) {
  655. // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
  656. // so the values can never be equal. Similarly for all other "or equals"
  657. // operators.
  658. // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
  659. // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
  660. // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
  661. if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
  662. Value *R =
  663. ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI);
  664. return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
  665. }
  666. // (X+1) >u X --> X <u (0-1) --> X != 255
  667. // (X+2) >u X --> X <u (0-2) --> X <u 254
  668. // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
  669. if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
  670. return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
  671. unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
  672. ConstantInt *SMax = ConstantInt::get(X->getContext(),
  673. APInt::getSignedMaxValue(BitWidth));
  674. // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
  675. // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
  676. // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
  677. // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
  678. // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
  679. // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
  680. if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
  681. return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
  682. // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
  683. // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
  684. // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
  685. // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
  686. // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
  687. // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
  688. assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
  689. Constant *C = Builder->getInt(CI->getValue()-1);
  690. return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
  691. }
  692. /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
  693. /// and CmpRHS are both known to be integer constants.
  694. Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
  695. ConstantInt *DivRHS) {
  696. ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
  697. const APInt &CmpRHSV = CmpRHS->getValue();
  698. // FIXME: If the operand types don't match the type of the divide
  699. // then don't attempt this transform. The code below doesn't have the
  700. // logic to deal with a signed divide and an unsigned compare (and
  701. // vice versa). This is because (x /s C1) <s C2 produces different
  702. // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
  703. // (x /u C1) <u C2. Simply casting the operands and result won't
  704. // work. :( The if statement below tests that condition and bails
  705. // if it finds it.
  706. bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
  707. if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
  708. return nullptr;
  709. if (DivRHS->isZero())
  710. return nullptr; // The ProdOV computation fails on divide by zero.
  711. if (DivIsSigned && DivRHS->isAllOnesValue())
  712. return nullptr; // The overflow computation also screws up here
  713. if (DivRHS->isOne()) {
  714. // This eliminates some funny cases with INT_MIN.
  715. ICI.setOperand(0, DivI->getOperand(0)); // X/1 == X.
  716. return &ICI;
  717. }
  718. // Compute Prod = CI * DivRHS. We are essentially solving an equation
  719. // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
  720. // C2 (CI). By solving for X we can turn this into a range check
  721. // instead of computing a divide.
  722. Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
  723. // Determine if the product overflows by seeing if the product is
  724. // not equal to the divide. Make sure we do the same kind of divide
  725. // as in the LHS instruction that we're folding.
  726. bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) :
  727. ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
  728. // Get the ICmp opcode
  729. ICmpInst::Predicate Pred = ICI.getPredicate();
  730. /// If the division is known to be exact, then there is no remainder from the
  731. /// divide, so the covered range size is unit, otherwise it is the divisor.
  732. ConstantInt *RangeSize = DivI->isExact() ? getOne(Prod) : DivRHS;
  733. // Figure out the interval that is being checked. For example, a comparison
  734. // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
  735. // Compute this interval based on the constants involved and the signedness of
  736. // the compare/divide. This computes a half-open interval, keeping track of
  737. // whether either value in the interval overflows. After analysis each
  738. // overflow variable is set to 0 if it's corresponding bound variable is valid
  739. // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
  740. int LoOverflow = 0, HiOverflow = 0;
  741. Constant *LoBound = nullptr, *HiBound = nullptr;
  742. if (!DivIsSigned) { // udiv
  743. // e.g. X/5 op 3 --> [15, 20)
  744. LoBound = Prod;
  745. HiOverflow = LoOverflow = ProdOV;
  746. if (!HiOverflow) {
  747. // If this is not an exact divide, then many values in the range collapse
  748. // to the same result value.
  749. HiOverflow = AddWithOverflow(HiBound, LoBound, RangeSize, false);
  750. }
  751. } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
  752. if (CmpRHSV == 0) { // (X / pos) op 0
  753. // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
  754. LoBound = ConstantExpr::getNeg(SubOne(RangeSize));
  755. HiBound = RangeSize;
  756. } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
  757. LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
  758. HiOverflow = LoOverflow = ProdOV;
  759. if (!HiOverflow)
  760. HiOverflow = AddWithOverflow(HiBound, Prod, RangeSize, true);
  761. } else { // (X / pos) op neg
  762. // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
  763. HiBound = AddOne(Prod);
  764. LoOverflow = HiOverflow = ProdOV ? -1 : 0;
  765. if (!LoOverflow) {
  766. ConstantInt *DivNeg =cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
  767. LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
  768. }
  769. }
  770. } else if (DivRHS->isNegative()) { // Divisor is < 0.
  771. if (DivI->isExact())
  772. RangeSize = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
  773. if (CmpRHSV == 0) { // (X / neg) op 0
  774. // e.g. X/-5 op 0 --> [-4, 5)
  775. LoBound = AddOne(RangeSize);
  776. HiBound = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
  777. if (HiBound == DivRHS) { // -INTMIN = INTMIN
  778. HiOverflow = 1; // [INTMIN+1, overflow)
  779. HiBound = nullptr; // e.g. X/INTMIN = 0 --> X > INTMIN
  780. }
  781. } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
  782. // e.g. X/-5 op 3 --> [-19, -14)
  783. HiBound = AddOne(Prod);
  784. HiOverflow = LoOverflow = ProdOV ? -1 : 0;
  785. if (!LoOverflow)
  786. LoOverflow = AddWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
  787. } else { // (X / neg) op neg
  788. LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
  789. LoOverflow = HiOverflow = ProdOV;
  790. if (!HiOverflow)
  791. HiOverflow = SubWithOverflow(HiBound, Prod, RangeSize, true);
  792. }
  793. // Dividing by a negative swaps the condition. LT <-> GT
  794. Pred = ICmpInst::getSwappedPredicate(Pred);
  795. }
  796. Value *X = DivI->getOperand(0);
  797. switch (Pred) {
  798. default: llvm_unreachable("Unhandled icmp opcode!");
  799. case ICmpInst::ICMP_EQ:
  800. if (LoOverflow && HiOverflow)
  801. return ReplaceInstUsesWith(ICI, Builder->getFalse());
  802. if (HiOverflow)
  803. return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
  804. ICmpInst::ICMP_UGE, X, LoBound);
  805. if (LoOverflow)
  806. return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
  807. ICmpInst::ICMP_ULT, X, HiBound);
  808. return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
  809. DivIsSigned, true));
  810. case ICmpInst::ICMP_NE:
  811. if (LoOverflow && HiOverflow)
  812. return ReplaceInstUsesWith(ICI, Builder->getTrue());
  813. if (HiOverflow)
  814. return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
  815. ICmpInst::ICMP_ULT, X, LoBound);
  816. if (LoOverflow)
  817. return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
  818. ICmpInst::ICMP_UGE, X, HiBound);
  819. return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
  820. DivIsSigned, false));
  821. case ICmpInst::ICMP_ULT:
  822. case ICmpInst::ICMP_SLT:
  823. if (LoOverflow == +1) // Low bound is greater than input range.
  824. return ReplaceInstUsesWith(ICI, Builder->getTrue());
  825. if (LoOverflow == -1) // Low bound is less than input range.
  826. return ReplaceInstUsesWith(ICI, Builder->getFalse());
  827. return new ICmpInst(Pred, X, LoBound);
  828. case ICmpInst::ICMP_UGT:
  829. case ICmpInst::ICMP_SGT:
  830. if (HiOverflow == +1) // High bound greater than input range.
  831. return ReplaceInstUsesWith(ICI, Builder->getFalse());
  832. if (HiOverflow == -1) // High bound less than input range.
  833. return ReplaceInstUsesWith(ICI, Builder->getTrue());
  834. if (Pred == ICmpInst::ICMP_UGT)
  835. return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
  836. return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
  837. }
  838. }
  839. /// FoldICmpShrCst - Handle "icmp(([al]shr X, cst1), cst2)".
  840. Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
  841. ConstantInt *ShAmt) {
  842. const APInt &CmpRHSV = cast<ConstantInt>(ICI.getOperand(1))->getValue();
  843. // Check that the shift amount is in range. If not, don't perform
  844. // undefined shifts. When the shift is visited it will be
  845. // simplified.
  846. uint32_t TypeBits = CmpRHSV.getBitWidth();
  847. uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
  848. if (ShAmtVal >= TypeBits || ShAmtVal == 0)
  849. return nullptr;
  850. if (!ICI.isEquality()) {
  851. // If we have an unsigned comparison and an ashr, we can't simplify this.
  852. // Similarly for signed comparisons with lshr.
  853. if (ICI.isSigned() != (Shr->getOpcode() == Instruction::AShr))
  854. return nullptr;
  855. // Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv
  856. // by a power of 2. Since we already have logic to simplify these,
  857. // transform to div and then simplify the resultant comparison.
  858. if (Shr->getOpcode() == Instruction::AShr &&
  859. (!Shr->isExact() || ShAmtVal == TypeBits - 1))
  860. return nullptr;
  861. // Revisit the shift (to delete it).
  862. Worklist.Add(Shr);
  863. Constant *DivCst =
  864. ConstantInt::get(Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal));
  865. Value *Tmp =
  866. Shr->getOpcode() == Instruction::AShr ?
  867. Builder->CreateSDiv(Shr->getOperand(0), DivCst, "", Shr->isExact()) :
  868. Builder->CreateUDiv(Shr->getOperand(0), DivCst, "", Shr->isExact());
  869. ICI.setOperand(0, Tmp);
  870. // If the builder folded the binop, just return it.
  871. BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp);
  872. if (!TheDiv)
  873. return &ICI;
  874. // Otherwise, fold this div/compare.
  875. assert(TheDiv->getOpcode() == Instruction::SDiv ||
  876. TheDiv->getOpcode() == Instruction::UDiv);
  877. Instruction *Res = FoldICmpDivCst(ICI, TheDiv, cast<ConstantInt>(DivCst));
  878. assert(Res && "This div/cst should have folded!");
  879. return Res;
  880. }
  881. // If we are comparing against bits always shifted out, the
  882. // comparison cannot succeed.
  883. APInt Comp = CmpRHSV << ShAmtVal;
  884. ConstantInt *ShiftedCmpRHS = Builder->getInt(Comp);
  885. if (Shr->getOpcode() == Instruction::LShr)
  886. Comp = Comp.lshr(ShAmtVal);
  887. else
  888. Comp = Comp.ashr(ShAmtVal);
  889. if (Comp != CmpRHSV) { // Comparing against a bit that we know is zero.
  890. bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
  891. Constant *Cst = Builder->getInt1(IsICMP_NE);
  892. return ReplaceInstUsesWith(ICI, Cst);
  893. }
  894. // Otherwise, check to see if the bits shifted out are known to be zero.
  895. // If so, we can compare against the unshifted value:
  896. // (X & 4) >> 1 == 2 --> (X & 4) == 4.
  897. if (Shr->hasOneUse() && Shr->isExact())
  898. return new ICmpInst(ICI.getPredicate(), Shr->getOperand(0), ShiftedCmpRHS);
  899. if (Shr->hasOneUse()) {
  900. // Otherwise strength reduce the shift into an and.
  901. APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
  902. Constant *Mask = Builder->getInt(Val);
  903. Value *And = Builder->CreateAnd(Shr->getOperand(0),
  904. Mask, Shr->getName()+".mask");
  905. return new ICmpInst(ICI.getPredicate(), And, ShiftedCmpRHS);
  906. }
  907. return nullptr;
  908. }
  909. /// FoldICmpCstShrCst - Handle "(icmp eq/ne (ashr/lshr const2, A), const1)" ->
  910. /// (icmp eq/ne A, Log2(const2/const1)) ->
  911. /// (icmp eq/ne A, Log2(const2) - Log2(const1)).
  912. Instruction *InstCombiner::FoldICmpCstShrCst(ICmpInst &I, Value *Op, Value *A,
  913. ConstantInt *CI1,
  914. ConstantInt *CI2) {
  915. assert(I.isEquality() && "Cannot fold icmp gt/lt");
  916. auto getConstant = [&I, this](bool IsTrue) {
  917. if (I.getPredicate() == I.ICMP_NE)
  918. IsTrue = !IsTrue;
  919. return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), IsTrue));
  920. };
  921. auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
  922. if (I.getPredicate() == I.ICMP_NE)
  923. Pred = CmpInst::getInversePredicate(Pred);
  924. return new ICmpInst(Pred, LHS, RHS);
  925. };
  926. APInt AP1 = CI1->getValue();
  927. APInt AP2 = CI2->getValue();
  928. // Don't bother doing any work for cases which InstSimplify handles.
  929. if (AP2 == 0)
  930. return nullptr;
  931. bool IsAShr = isa<AShrOperator>(Op);
  932. if (IsAShr) {
  933. if (AP2.isAllOnesValue())
  934. return nullptr;
  935. if (AP2.isNegative() != AP1.isNegative())
  936. return nullptr;
  937. if (AP2.sgt(AP1))
  938. return nullptr;
  939. }
  940. if (!AP1)
  941. // 'A' must be large enough to shift out the highest set bit.
  942. return getICmp(I.ICMP_UGT, A,
  943. ConstantInt::get(A->getType(), AP2.logBase2()));
  944. if (AP1 == AP2)
  945. return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
  946. // Get the distance between the highest bit that's set.
  947. int Shift;
  948. // Both the constants are negative, take their positive to calculate log.
  949. if (IsAShr && AP1.isNegative())
  950. // Get the ones' complement of AP2 and AP1 when computing the distance.
  951. Shift = (~AP2).logBase2() - (~AP1).logBase2();
  952. else
  953. Shift = AP2.logBase2() - AP1.logBase2();
  954. if (Shift > 0) {
  955. if (IsAShr ? AP1 == AP2.ashr(Shift) : AP1 == AP2.lshr(Shift))
  956. return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
  957. }
  958. // Shifting const2 will never be equal to const1.
  959. return getConstant(false);
  960. }
  961. /// FoldICmpCstShlCst - Handle "(icmp eq/ne (shl const2, A), const1)" ->
  962. /// (icmp eq/ne A, TrailingZeros(const1) - TrailingZeros(const2)).
  963. Instruction *InstCombiner::FoldICmpCstShlCst(ICmpInst &I, Value *Op, Value *A,
  964. ConstantInt *CI1,
  965. ConstantInt *CI2) {
  966. assert(I.isEquality() && "Cannot fold icmp gt/lt");
  967. auto getConstant = [&I, this](bool IsTrue) {
  968. if (I.getPredicate() == I.ICMP_NE)
  969. IsTrue = !IsTrue;
  970. return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), IsTrue));
  971. };
  972. auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
  973. if (I.getPredicate() == I.ICMP_NE)
  974. Pred = CmpInst::getInversePredicate(Pred);
  975. return new ICmpInst(Pred, LHS, RHS);
  976. };
  977. APInt AP1 = CI1->getValue();
  978. APInt AP2 = CI2->getValue();
  979. // Don't bother doing any work for cases which InstSimplify handles.
  980. if (AP2 == 0)
  981. return nullptr;
  982. unsigned AP2TrailingZeros = AP2.countTrailingZeros();
  983. if (!AP1 && AP2TrailingZeros != 0)
  984. return getICmp(I.ICMP_UGE, A,
  985. ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
  986. if (AP1 == AP2)
  987. return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
  988. // Get the distance between the lowest bits that are set.
  989. int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
  990. if (Shift > 0 && AP2.shl(Shift) == AP1)
  991. return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
  992. // Shifting const2 will never be equal to const1.
  993. return getConstant(false);
  994. }
  995. /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
  996. ///
  997. Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
  998. Instruction *LHSI,
  999. ConstantInt *RHS) {
  1000. const APInt &RHSV = RHS->getValue();
  1001. switch (LHSI->getOpcode()) {
  1002. case Instruction::Trunc:
  1003. if (ICI.isEquality() && LHSI->hasOneUse()) {
  1004. // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
  1005. // of the high bits truncated out of x are known.
  1006. unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
  1007. SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
  1008. APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
  1009. computeKnownBits(LHSI->getOperand(0), KnownZero, KnownOne, 0, &ICI);
  1010. // If all the high bits are known, we can do this xform.
  1011. if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
  1012. // Pull in the high bits from known-ones set.
  1013. APInt NewRHS = RHS->getValue().zext(SrcBits);
  1014. NewRHS |= KnownOne & APInt::getHighBitsSet(SrcBits, SrcBits-DstBits);
  1015. return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
  1016. Builder->getInt(NewRHS));
  1017. }
  1018. }
  1019. break;
  1020. case Instruction::Xor: // (icmp pred (xor X, XorCst), CI)
  1021. if (ConstantInt *XorCst = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
  1022. // If this is a comparison that tests the signbit (X < 0) or (x > -1),
  1023. // fold the xor.
  1024. if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
  1025. (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
  1026. Value *CompareVal = LHSI->getOperand(0);
  1027. // If the sign bit of the XorCst is not set, there is no change to
  1028. // the operation, just stop using the Xor.
  1029. if (!XorCst->isNegative()) {
  1030. ICI.setOperand(0, CompareVal);
  1031. Worklist.Add(LHSI);
  1032. return &ICI;
  1033. }
  1034. // Was the old condition true if the operand is positive?
  1035. bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT;
  1036. // If so, the new one isn't.
  1037. isTrueIfPositive ^= true;
  1038. if (isTrueIfPositive)
  1039. return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal,
  1040. SubOne(RHS));
  1041. else
  1042. return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal,
  1043. AddOne(RHS));
  1044. }
  1045. if (LHSI->hasOneUse()) {
  1046. // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
  1047. if (!ICI.isEquality() && XorCst->getValue().isSignBit()) {
  1048. const APInt &SignBit = XorCst->getValue();
  1049. ICmpInst::Predicate Pred = ICI.isSigned()
  1050. ? ICI.getUnsignedPredicate()
  1051. : ICI.getSignedPredicate();
  1052. return new ICmpInst(Pred, LHSI->getOperand(0),
  1053. Builder->getInt(RHSV ^ SignBit));
  1054. }
  1055. // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
  1056. if (!ICI.isEquality() && XorCst->isMaxValue(true)) {
  1057. const APInt &NotSignBit = XorCst->getValue();
  1058. ICmpInst::Predicate Pred = ICI.isSigned()
  1059. ? ICI.getUnsignedPredicate()
  1060. : ICI.getSignedPredicate();
  1061. Pred = ICI.getSwappedPredicate(Pred);
  1062. return new ICmpInst(Pred, LHSI->getOperand(0),
  1063. Builder->getInt(RHSV ^ NotSignBit));
  1064. }
  1065. }
  1066. // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C)
  1067. // iff -C is a power of 2
  1068. if (ICI.getPredicate() == ICmpInst::ICMP_UGT &&
  1069. XorCst->getValue() == ~RHSV && (RHSV + 1).isPowerOf2())
  1070. return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0), XorCst);
  1071. // (icmp ult (xor X, C), -C) -> (icmp uge X, C)
  1072. // iff -C is a power of 2
  1073. if (ICI.getPredicate() == ICmpInst::ICMP_ULT &&
  1074. XorCst->getValue() == -RHSV && RHSV.isPowerOf2())
  1075. return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0), XorCst);
  1076. }
  1077. break;
  1078. case Instruction::And: // (icmp pred (and X, AndCst), RHS)
  1079. if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
  1080. LHSI->getOperand(0)->hasOneUse()) {
  1081. ConstantInt *AndCst = cast<ConstantInt>(LHSI->getOperand(1));
  1082. // If the LHS is an AND of a truncating cast, we can widen the
  1083. // and/compare to be the input width without changing the value
  1084. // produced, eliminating a cast.
  1085. if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) {
  1086. // We can do this transformation if either the AND constant does not
  1087. // have its sign bit set or if it is an equality comparison.
  1088. // Extending a relational comparison when we're checking the sign
  1089. // bit would not work.
  1090. if (ICI.isEquality() ||
  1091. (!AndCst->isNegative() && RHSV.isNonNegative())) {
  1092. Value *NewAnd =
  1093. Builder->CreateAnd(Cast->getOperand(0),
  1094. ConstantExpr::getZExt(AndCst, Cast->getSrcTy()));
  1095. NewAnd->takeName(LHSI);
  1096. return new ICmpInst(ICI.getPredicate(), NewAnd,
  1097. ConstantExpr::getZExt(RHS, Cast->getSrcTy()));
  1098. }
  1099. }
  1100. // If the LHS is an AND of a zext, and we have an equality compare, we can
  1101. // shrink the and/compare to the smaller type, eliminating the cast.
  1102. if (ZExtInst *Cast = dyn_cast<ZExtInst>(LHSI->getOperand(0))) {
  1103. IntegerType *Ty = cast<IntegerType>(Cast->getSrcTy());
  1104. // Make sure we don't compare the upper bits, SimplifyDemandedBits
  1105. // should fold the icmp to true/false in that case.
  1106. if (ICI.isEquality() && RHSV.getActiveBits() <= Ty->getBitWidth()) {
  1107. Value *NewAnd =
  1108. Builder->CreateAnd(Cast->getOperand(0),
  1109. ConstantExpr::getTrunc(AndCst, Ty));
  1110. NewAnd->takeName(LHSI);
  1111. return new ICmpInst(ICI.getPredicate(), NewAnd,
  1112. ConstantExpr::getTrunc(RHS, Ty));
  1113. }
  1114. }
  1115. // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
  1116. // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
  1117. // happens a LOT in code produced by the C front-end, for bitfield
  1118. // access.
  1119. BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
  1120. if (Shift && !Shift->isShift())
  1121. Shift = nullptr;
  1122. ConstantInt *ShAmt;
  1123. ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : nullptr;
  1124. // This seemingly simple opportunity to fold away a shift turns out to
  1125. // be rather complicated. See PR17827
  1126. // ( http://llvm.org/bugs/show_bug.cgi?id=17827 ) for details.
  1127. if (ShAmt) {
  1128. bool CanFold = false;
  1129. unsigned ShiftOpcode = Shift->getOpcode();
  1130. if (ShiftOpcode == Instruction::AShr) {
  1131. // There may be some constraints that make this possible,
  1132. // but nothing simple has been discovered yet.
  1133. CanFold = false;
  1134. } else if (ShiftOpcode == Instruction::Shl) {
  1135. // For a left shift, we can fold if the comparison is not signed.
  1136. // We can also fold a signed comparison if the mask value and
  1137. // comparison value are not negative. These constraints may not be
  1138. // obvious, but we can prove that they are correct using an SMT
  1139. // solver.
  1140. if (!ICI.isSigned() || (!AndCst->isNegative() && !RHS->isNegative()))
  1141. CanFold = true;
  1142. } else if (ShiftOpcode == Instruction::LShr) {
  1143. // For a logical right shift, we can fold if the comparison is not
  1144. // signed. We can also fold a signed comparison if the shifted mask
  1145. // value and the shifted comparison value are not negative.
  1146. // These constraints may not be obvious, but we can prove that they
  1147. // are correct using an SMT solver.
  1148. if (!ICI.isSigned())
  1149. CanFold = true;
  1150. else {
  1151. ConstantInt *ShiftedAndCst =
  1152. cast<ConstantInt>(ConstantExpr::getShl(AndCst, ShAmt));
  1153. ConstantInt *ShiftedRHSCst =
  1154. cast<ConstantInt>(ConstantExpr::getShl(RHS, ShAmt));
  1155. if (!ShiftedAndCst->isNegative() && !ShiftedRHSCst->isNegative())
  1156. CanFold = true;
  1157. }
  1158. }
  1159. if (CanFold) {
  1160. Constant *NewCst;
  1161. if (ShiftOpcode == Instruction::Shl)
  1162. NewCst = ConstantExpr::getLShr(RHS, ShAmt);
  1163. else
  1164. NewCst = ConstantExpr::getShl(RHS, ShAmt);
  1165. // Check to see if we are shifting out any of the bits being
  1166. // compared.
  1167. if (ConstantExpr::get(ShiftOpcode, NewCst, ShAmt) != RHS) {
  1168. // If we shifted bits out, the fold is not going to work out.
  1169. // As a special case, check to see if this means that the
  1170. // result is always true or false now.
  1171. if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
  1172. return ReplaceInstUsesWith(ICI, Builder->getFalse());
  1173. if (ICI.getPredicate() == ICmpInst::ICMP_NE)
  1174. return ReplaceInstUsesWith(ICI, Builder->getTrue());
  1175. } else {
  1176. ICI.setOperand(1, NewCst);
  1177. Constant *NewAndCst;
  1178. if (ShiftOpcode == Instruction::Shl)
  1179. NewAndCst = ConstantExpr::getLShr(AndCst, ShAmt);
  1180. else
  1181. NewAndCst = ConstantExpr::getShl(AndCst, ShAmt);
  1182. LHSI->setOperand(1, NewAndCst);
  1183. LHSI->setOperand(0, Shift->getOperand(0));
  1184. Worklist.Add(Shift); // Shift is dead.
  1185. return &ICI;
  1186. }
  1187. }
  1188. }
  1189. // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
  1190. // preferable because it allows the C<<Y expression to be hoisted out
  1191. // of a loop if Y is invariant and X is not.
  1192. if (Shift && Shift->hasOneUse() && RHSV == 0 &&
  1193. ICI.isEquality() && !Shift->isArithmeticShift() &&
  1194. !isa<Constant>(Shift->getOperand(0))) {
  1195. // Compute C << Y.
  1196. Value *NS;
  1197. if (Shift->getOpcode() == Instruction::LShr) {
  1198. NS = Builder->CreateShl(AndCst, Shift->getOperand(1));
  1199. } else {
  1200. // Insert a logical shift.
  1201. NS = Builder->CreateLShr(AndCst, Shift->getOperand(1));
  1202. }
  1203. // Compute X & (C << Y).
  1204. Value *NewAnd =
  1205. Builder->CreateAnd(Shift->getOperand(0), NS, LHSI->getName());
  1206. ICI.setOperand(0, NewAnd);
  1207. return &ICI;
  1208. }
  1209. // (icmp pred (and (or (lshr X, Y), X), 1), 0) -->
  1210. // (icmp pred (and X, (or (shl 1, Y), 1), 0))
  1211. //
  1212. // iff pred isn't signed
  1213. {
  1214. Value *X, *Y, *LShr;
  1215. if (!ICI.isSigned() && RHSV == 0) {
  1216. if (match(LHSI->getOperand(1), m_One())) {
  1217. Constant *One = cast<Constant>(LHSI->getOperand(1));
  1218. Value *Or = LHSI->getOperand(0);
  1219. if (match(Or, m_Or(m_Value(LShr), m_Value(X))) &&
  1220. match(LShr, m_LShr(m_Specific(X), m_Value(Y)))) {
  1221. unsigned UsesRemoved = 0;
  1222. if (LHSI->hasOneUse())
  1223. ++UsesRemoved;
  1224. if (Or->hasOneUse())
  1225. ++UsesRemoved;
  1226. if (LShr->hasOneUse())
  1227. ++UsesRemoved;
  1228. Value *NewOr = nullptr;
  1229. // Compute X & ((1 << Y) | 1)
  1230. if (auto *C = dyn_cast<Constant>(Y)) {
  1231. if (UsesRemoved >= 1)
  1232. NewOr =
  1233. ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
  1234. } else {
  1235. if (UsesRemoved >= 3)
  1236. NewOr = Builder->CreateOr(Builder->CreateShl(One, Y,
  1237. LShr->getName(),
  1238. /*HasNUW=*/true),
  1239. One, Or->getName());
  1240. }
  1241. if (NewOr) {
  1242. Value *NewAnd = Builder->CreateAnd(X, NewOr, LHSI->getName());
  1243. ICI.setOperand(0, NewAnd);
  1244. return &ICI;
  1245. }
  1246. }
  1247. }
  1248. }
  1249. }
  1250. // Replace ((X & AndCst) > RHSV) with ((X & AndCst) != 0), if any
  1251. // bit set in (X & AndCst) will produce a result greater than RHSV.
  1252. if (ICI.getPredicate() == ICmpInst::ICMP_UGT) {
  1253. unsigned NTZ = AndCst->getValue().countTrailingZeros();
  1254. if ((NTZ < AndCst->getBitWidth()) &&
  1255. APInt::getOneBitSet(AndCst->getBitWidth(), NTZ).ugt(RHSV))
  1256. return new ICmpInst(ICmpInst::ICMP_NE, LHSI,
  1257. Constant::getNullValue(RHS->getType()));
  1258. }
  1259. }
  1260. // Try to optimize things like "A[i]&42 == 0" to index computations.
  1261. if (LoadInst *LI = dyn_cast<LoadInst>(LHSI->getOperand(0))) {
  1262. if (GetElementPtrInst *GEP =
  1263. dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
  1264. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
  1265. if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
  1266. !LI->isVolatile() && isa<ConstantInt>(LHSI->getOperand(1))) {
  1267. ConstantInt *C = cast<ConstantInt>(LHSI->getOperand(1));
  1268. if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV,ICI, C))
  1269. return Res;
  1270. }
  1271. }
  1272. // X & -C == -C -> X > u ~C
  1273. // X & -C != -C -> X <= u ~C
  1274. // iff C is a power of 2
  1275. if (ICI.isEquality() && RHS == LHSI->getOperand(1) && (-RHSV).isPowerOf2())
  1276. return new ICmpInst(
  1277. ICI.getPredicate() == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_UGT
  1278. : ICmpInst::ICMP_ULE,
  1279. LHSI->getOperand(0), SubOne(RHS));
  1280. break;
  1281. case Instruction::Or: {
  1282. if (!ICI.isEquality() || !RHS->isNullValue() || !LHSI->hasOneUse())
  1283. break;
  1284. Value *P, *Q;
  1285. if (match(LHSI, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
  1286. // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
  1287. // -> and (icmp eq P, null), (icmp eq Q, null).
  1288. Value *ICIP = Builder->CreateICmp(ICI.getPredicate(), P,
  1289. Constant::getNullValue(P->getType()));
  1290. Value *ICIQ = Builder->CreateICmp(ICI.getPredicate(), Q,
  1291. Constant::getNullValue(Q->getType()));
  1292. Instruction *Op;
  1293. if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
  1294. Op = BinaryOperator::CreateAnd(ICIP, ICIQ);
  1295. else
  1296. Op = BinaryOperator::CreateOr(ICIP, ICIQ);
  1297. return Op;
  1298. }
  1299. break;
  1300. }
  1301. case Instruction::Mul: { // (icmp pred (mul X, Val), CI)
  1302. ConstantInt *Val = dyn_cast<ConstantInt>(LHSI->getOperand(1));
  1303. if (!Val) break;
  1304. // If this is a signed comparison to 0 and the mul is sign preserving,
  1305. // use the mul LHS operand instead.
  1306. ICmpInst::Predicate pred = ICI.getPredicate();
  1307. if (isSignTest(pred, RHS) && !Val->isZero() &&
  1308. cast<BinaryOperator>(LHSI)->hasNoSignedWrap())
  1309. return new ICmpInst(Val->isNegative() ?
  1310. ICmpInst::getSwappedPredicate(pred) : pred,
  1311. LHSI->getOperand(0),
  1312. Constant::getNullValue(RHS->getType()));
  1313. break;
  1314. }
  1315. case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI)
  1316. uint32_t TypeBits = RHSV.getBitWidth();
  1317. ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
  1318. if (!ShAmt) {
  1319. Value *X;
  1320. // (1 << X) pred P2 -> X pred Log2(P2)
  1321. if (match(LHSI, m_Shl(m_One(), m_Value(X)))) {
  1322. bool RHSVIsPowerOf2 = RHSV.isPowerOf2();
  1323. ICmpInst::Predicate Pred = ICI.getPredicate();
  1324. if (ICI.isUnsigned()) {
  1325. if (!RHSVIsPowerOf2) {
  1326. // (1 << X) < 30 -> X <= 4
  1327. // (1 << X) <= 30 -> X <= 4
  1328. // (1 << X) >= 30 -> X > 4
  1329. // (1 << X) > 30 -> X > 4
  1330. if (Pred == ICmpInst::ICMP_ULT)
  1331. Pred = ICmpInst::ICMP_ULE;
  1332. else if (Pred == ICmpInst::ICMP_UGE)
  1333. Pred = ICmpInst::ICMP_UGT;
  1334. }
  1335. unsigned RHSLog2 = RHSV.logBase2();
  1336. // (1 << X) >= 2147483648 -> X >= 31 -> X == 31
  1337. // (1 << X) < 2147483648 -> X < 31 -> X != 31
  1338. if (RHSLog2 == TypeBits-1) {
  1339. if (Pred == ICmpInst::ICMP_UGE)
  1340. Pred = ICmpInst::ICMP_EQ;
  1341. else if (Pred == ICmpInst::ICMP_ULT)
  1342. Pred = ICmpInst::ICMP_NE;
  1343. }
  1344. return new ICmpInst(Pred, X,
  1345. ConstantInt::get(RHS->getType(), RHSLog2));
  1346. } else if (ICI.isSigned()) {
  1347. if (RHSV.isAllOnesValue()) {
  1348. // (1 << X) <= -1 -> X == 31
  1349. if (Pred == ICmpInst::ICMP_SLE)
  1350. return new ICmpInst(ICmpInst::ICMP_EQ, X,
  1351. ConstantInt::get(RHS->getType(), TypeBits-1));
  1352. // (1 << X) > -1 -> X != 31
  1353. if (Pred == ICmpInst::ICMP_SGT)
  1354. return new ICmpInst(ICmpInst::ICMP_NE, X,
  1355. ConstantInt::get(RHS->getType(), TypeBits-1));
  1356. } else if (!RHSV) {
  1357. // (1 << X) < 0 -> X == 31
  1358. // (1 << X) <= 0 -> X == 31
  1359. if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
  1360. return new ICmpInst(ICmpInst::ICMP_EQ, X,
  1361. ConstantInt::get(RHS->getType(), TypeBits-1));
  1362. // (1 << X) >= 0 -> X != 31
  1363. // (1 << X) > 0 -> X != 31
  1364. if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
  1365. return new ICmpInst(ICmpInst::ICMP_NE, X,
  1366. ConstantInt::get(RHS->getType(), TypeBits-1));
  1367. }
  1368. } else if (ICI.isEquality()) {
  1369. if (RHSVIsPowerOf2)
  1370. return new ICmpInst(
  1371. Pred, X, ConstantInt::get(RHS->getType(), RHSV.logBase2()));
  1372. }
  1373. }
  1374. break;
  1375. }
  1376. // Check that the shift amount is in range. If not, don't perform
  1377. // undefined shifts. When the shift is visited it will be
  1378. // simplified.
  1379. if (ShAmt->uge(TypeBits))
  1380. break;
  1381. if (ICI.isEquality()) {
  1382. // If we are comparing against bits always shifted out, the
  1383. // comparison cannot succeed.
  1384. Constant *Comp =
  1385. ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt),
  1386. ShAmt);
  1387. if (Comp != RHS) {// Comparing against a bit that we know is zero.
  1388. bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
  1389. Constant *Cst = Builder->getInt1(IsICMP_NE);
  1390. return ReplaceInstUsesWith(ICI, Cst);
  1391. }
  1392. // If the shift is NUW, then it is just shifting out zeros, no need for an
  1393. // AND.
  1394. if (cast<BinaryOperator>(LHSI)->hasNoUnsignedWrap())
  1395. return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
  1396. ConstantExpr::getLShr(RHS, ShAmt));
  1397. // If the shift is NSW and we compare to 0, then it is just shifting out
  1398. // sign bits, no need for an AND either.
  1399. if (cast<BinaryOperator>(LHSI)->hasNoSignedWrap() && RHSV == 0)
  1400. return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
  1401. ConstantExpr::getLShr(RHS, ShAmt));
  1402. if (LHSI->hasOneUse()) {
  1403. // Otherwise strength reduce the shift into an and.
  1404. uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
  1405. Constant *Mask = Builder->getInt(APInt::getLowBitsSet(TypeBits,
  1406. TypeBits - ShAmtVal));
  1407. Value *And =
  1408. Builder->CreateAnd(LHSI->getOperand(0),Mask, LHSI->getName()+".mask");
  1409. return new ICmpInst(ICI.getPredicate(), And,
  1410. ConstantExpr::getLShr(RHS, ShAmt));
  1411. }
  1412. }
  1413. // If this is a signed comparison to 0 and the shift is sign preserving,
  1414. // use the shift LHS operand instead.
  1415. ICmpInst::Predicate pred = ICI.getPredicate();
  1416. if (isSignTest(pred, RHS) &&
  1417. cast<BinaryOperator>(LHSI)->hasNoSignedWrap())
  1418. return new ICmpInst(pred,
  1419. LHSI->getOperand(0),
  1420. Constant::getNullValue(RHS->getType()));
  1421. // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
  1422. bool TrueIfSigned = false;
  1423. if (LHSI->hasOneUse() &&
  1424. isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
  1425. // (X << 31) <s 0 --> (X&1) != 0
  1426. Constant *Mask = ConstantInt::get(LHSI->getOperand(0)->getType(),
  1427. APInt::getOneBitSet(TypeBits,
  1428. TypeBits-ShAmt->getZExtValue()-1));
  1429. Value *And =
  1430. Builder->CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask");
  1431. return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
  1432. And, Constant::getNullValue(And->getType()));
  1433. }
  1434. #if 0 // HLSL Change Begins: Disable optimization, it introduces new bitwidths
  1435. // Transform (icmp pred iM (shl iM %v, N), CI)
  1436. // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (CI>>N))
  1437. // Transform the shl to a trunc if (trunc (CI>>N)) has no loss and M-N.
  1438. // This enables to get rid of the shift in favor of a trunc which can be
  1439. // free on the target. It has the additional benefit of comparing to a
  1440. // smaller constant, which will be target friendly.
  1441. unsigned Amt = ShAmt->getLimitedValue(TypeBits-1);
  1442. if (LHSI->hasOneUse() &&
  1443. Amt != 0 && RHSV.countTrailingZeros() >= Amt) {
  1444. Type *NTy = IntegerType::get(ICI.getContext(), TypeBits - Amt);
  1445. Constant *NCI = ConstantExpr::getTrunc(
  1446. ConstantExpr::getAShr(RHS,
  1447. ConstantInt::get(RHS->getType(), Amt)),
  1448. NTy);
  1449. return new ICmpInst(ICI.getPredicate(),
  1450. Builder->CreateTrunc(LHSI->getOperand(0), NTy),
  1451. NCI);
  1452. }
  1453. #endif // HLSL Change Ends
  1454. break;
  1455. }
  1456. case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
  1457. case Instruction::AShr: {
  1458. // Handle equality comparisons of shift-by-constant.
  1459. BinaryOperator *BO = cast<BinaryOperator>(LHSI);
  1460. if (ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
  1461. if (Instruction *Res = FoldICmpShrCst(ICI, BO, ShAmt))
  1462. return Res;
  1463. }
  1464. // Handle exact shr's.
  1465. if (ICI.isEquality() && BO->isExact() && BO->hasOneUse()) {
  1466. if (RHSV.isMinValue())
  1467. return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), RHS);
  1468. }
  1469. break;
  1470. }
  1471. case Instruction::SDiv:
  1472. case Instruction::UDiv:
  1473. // Fold: icmp pred ([us]div X, C1), C2 -> range test
  1474. // Fold this div into the comparison, producing a range check.
  1475. // Determine, based on the divide type, what the range is being
  1476. // checked. If there is an overflow on the low or high side, remember
  1477. // it, otherwise compute the range [low, hi) bounding the new value.
  1478. // See: InsertRangeTest above for the kinds of replacements possible.
  1479. if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
  1480. if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
  1481. DivRHS))
  1482. return R;
  1483. break;
  1484. case Instruction::Sub: {
  1485. ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(0));
  1486. if (!LHSC) break;
  1487. const APInt &LHSV = LHSC->getValue();
  1488. // C1-X <u C2 -> (X|(C2-1)) == C1
  1489. // iff C1 & (C2-1) == C2-1
  1490. // C2 is a power of 2
  1491. if (ICI.getPredicate() == ICmpInst::ICMP_ULT && LHSI->hasOneUse() &&
  1492. RHSV.isPowerOf2() && (LHSV & (RHSV - 1)) == (RHSV - 1))
  1493. return new ICmpInst(ICmpInst::ICMP_EQ,
  1494. Builder->CreateOr(LHSI->getOperand(1), RHSV - 1),
  1495. LHSC);
  1496. // C1-X >u C2 -> (X|C2) != C1
  1497. // iff C1 & C2 == C2
  1498. // C2+1 is a power of 2
  1499. if (ICI.getPredicate() == ICmpInst::ICMP_UGT && LHSI->hasOneUse() &&
  1500. (RHSV + 1).isPowerOf2() && (LHSV & RHSV) == RHSV)
  1501. return new ICmpInst(ICmpInst::ICMP_NE,
  1502. Builder->CreateOr(LHSI->getOperand(1), RHSV), LHSC);
  1503. break;
  1504. }
  1505. case Instruction::Add:
  1506. // Fold: icmp pred (add X, C1), C2
  1507. if (!ICI.isEquality()) {
  1508. ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1));
  1509. if (!LHSC) break;
  1510. const APInt &LHSV = LHSC->getValue();
  1511. ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
  1512. .subtract(LHSV);
  1513. if (ICI.isSigned()) {
  1514. if (CR.getLower().isSignBit()) {
  1515. return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0),
  1516. Builder->getInt(CR.getUpper()));
  1517. } else if (CR.getUpper().isSignBit()) {
  1518. return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0),
  1519. Builder->getInt(CR.getLower()));
  1520. }
  1521. } else {
  1522. if (CR.getLower().isMinValue()) {
  1523. return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0),
  1524. Builder->getInt(CR.getUpper()));
  1525. } else if (CR.getUpper().isMinValue()) {
  1526. return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0),
  1527. Builder->getInt(CR.getLower()));
  1528. }
  1529. }
  1530. // X-C1 <u C2 -> (X & -C2) == C1
  1531. // iff C1 & (C2-1) == 0
  1532. // C2 is a power of 2
  1533. if (ICI.getPredicate() == ICmpInst::ICMP_ULT && LHSI->hasOneUse() &&
  1534. RHSV.isPowerOf2() && (LHSV & (RHSV - 1)) == 0)
  1535. return new ICmpInst(ICmpInst::ICMP_EQ,
  1536. Builder->CreateAnd(LHSI->getOperand(0), -RHSV),
  1537. ConstantExpr::getNeg(LHSC));
  1538. // X-C1 >u C2 -> (X & ~C2) != C1
  1539. // iff C1 & C2 == 0
  1540. // C2+1 is a power of 2
  1541. if (ICI.getPredicate() == ICmpInst::ICMP_UGT && LHSI->hasOneUse() &&
  1542. (RHSV + 1).isPowerOf2() && (LHSV & RHSV) == 0)
  1543. return new ICmpInst(ICmpInst::ICMP_NE,
  1544. Builder->CreateAnd(LHSI->getOperand(0), ~RHSV),
  1545. ConstantExpr::getNeg(LHSC));
  1546. }
  1547. break;
  1548. }
  1549. // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
  1550. if (ICI.isEquality()) {
  1551. bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
  1552. // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
  1553. // the second operand is a constant, simplify a bit.
  1554. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) {
  1555. switch (BO->getOpcode()) {
  1556. case Instruction::SRem:
  1557. // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
  1558. if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){
  1559. const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue();
  1560. if (V.sgt(1) && V.isPowerOf2()) {
  1561. Value *NewRem =
  1562. Builder->CreateURem(BO->getOperand(0), BO->getOperand(1),
  1563. BO->getName());
  1564. return new ICmpInst(ICI.getPredicate(), NewRem,
  1565. Constant::getNullValue(BO->getType()));
  1566. }
  1567. }
  1568. break;
  1569. case Instruction::Add:
  1570. // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
  1571. if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) {
  1572. if (BO->hasOneUse())
  1573. return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
  1574. ConstantExpr::getSub(RHS, BOp1C));
  1575. } else if (RHSV == 0) {
  1576. // Replace ((add A, B) != 0) with (A != -B) if A or B is
  1577. // efficiently invertible, or if the add has just this one use.
  1578. Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
  1579. if (Value *NegVal = dyn_castNegVal(BOp1))
  1580. return new ICmpInst(ICI.getPredicate(), BOp0, NegVal);
  1581. if (Value *NegVal = dyn_castNegVal(BOp0))
  1582. return new ICmpInst(ICI.getPredicate(), NegVal, BOp1);
  1583. if (BO->hasOneUse()) {
  1584. Value *Neg = Builder->CreateNeg(BOp1);
  1585. Neg->takeName(BO);
  1586. return new ICmpInst(ICI.getPredicate(), BOp0, Neg);
  1587. }
  1588. }
  1589. break;
  1590. case Instruction::Xor:
  1591. // For the xor case, we can xor two constants together, eliminating
  1592. // the explicit xor.
  1593. if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) {
  1594. return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
  1595. ConstantExpr::getXor(RHS, BOC));
  1596. } else if (RHSV == 0) {
  1597. // Replace ((xor A, B) != 0) with (A != B)
  1598. return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
  1599. BO->getOperand(1));
  1600. }
  1601. break;
  1602. case Instruction::Sub:
  1603. // Replace ((sub A, B) != C) with (B != A-C) if A & C are constants.
  1604. if (ConstantInt *BOp0C = dyn_cast<ConstantInt>(BO->getOperand(0))) {
  1605. if (BO->hasOneUse())
  1606. return new ICmpInst(ICI.getPredicate(), BO->getOperand(1),
  1607. ConstantExpr::getSub(BOp0C, RHS));
  1608. } else if (RHSV == 0) {
  1609. // Replace ((sub A, B) != 0) with (A != B)
  1610. return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
  1611. BO->getOperand(1));
  1612. }
  1613. break;
  1614. case Instruction::Or:
  1615. // If bits are being or'd in that are not present in the constant we
  1616. // are comparing against, then the comparison could never succeed!
  1617. if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
  1618. Constant *NotCI = ConstantExpr::getNot(RHS);
  1619. if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
  1620. return ReplaceInstUsesWith(ICI, Builder->getInt1(isICMP_NE));
  1621. }
  1622. break;
  1623. case Instruction::And:
  1624. if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
  1625. // If bits are being compared against that are and'd out, then the
  1626. // comparison can never succeed!
  1627. if ((RHSV & ~BOC->getValue()) != 0)
  1628. return ReplaceInstUsesWith(ICI, Builder->getInt1(isICMP_NE));
  1629. // If we have ((X & C) == C), turn it into ((X & C) != 0).
  1630. if (RHS == BOC && RHSV.isPowerOf2())
  1631. return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ :
  1632. ICmpInst::ICMP_NE, LHSI,
  1633. Constant::getNullValue(RHS->getType()));
  1634. // Don't perform the following transforms if the AND has multiple uses
  1635. if (!BO->hasOneUse())
  1636. break;
  1637. // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
  1638. if (BOC->getValue().isSignBit()) {
  1639. Value *X = BO->getOperand(0);
  1640. Constant *Zero = Constant::getNullValue(X->getType());
  1641. ICmpInst::Predicate pred = isICMP_NE ?
  1642. ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
  1643. return new ICmpInst(pred, X, Zero);
  1644. }
  1645. // ((X & ~7) == 0) --> X < 8
  1646. if (RHSV == 0 && isHighOnes(BOC)) {
  1647. Value *X = BO->getOperand(0);
  1648. Constant *NegX = ConstantExpr::getNeg(BOC);
  1649. ICmpInst::Predicate pred = isICMP_NE ?
  1650. ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
  1651. return new ICmpInst(pred, X, NegX);
  1652. }
  1653. }
  1654. break;
  1655. case Instruction::Mul:
  1656. if (RHSV == 0 && BO->hasNoSignedWrap()) {
  1657. if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
  1658. // The trivial case (mul X, 0) is handled by InstSimplify
  1659. // General case : (mul X, C) != 0 iff X != 0
  1660. // (mul X, C) == 0 iff X == 0
  1661. if (!BOC->isZero())
  1662. return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
  1663. Constant::getNullValue(RHS->getType()));
  1664. }
  1665. }
  1666. break;
  1667. default: break;
  1668. }
  1669. } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) {
  1670. // Handle icmp {eq|ne} <intrinsic>, intcst.
  1671. switch (II->getIntrinsicID()) {
  1672. case Intrinsic::bswap:
  1673. Worklist.Add(II);
  1674. ICI.setOperand(0, II->getArgOperand(0));
  1675. ICI.setOperand(1, Builder->getInt(RHSV.byteSwap()));
  1676. return &ICI;
  1677. case Intrinsic::ctlz:
  1678. case Intrinsic::cttz:
  1679. // ctz(A) == bitwidth(a) -> A == 0 and likewise for !=
  1680. if (RHSV == RHS->getType()->getBitWidth()) {
  1681. Worklist.Add(II);
  1682. ICI.setOperand(0, II->getArgOperand(0));
  1683. ICI.setOperand(1, ConstantInt::get(RHS->getType(), 0));
  1684. return &ICI;
  1685. }
  1686. break;
  1687. case Intrinsic::ctpop:
  1688. // popcount(A) == 0 -> A == 0 and likewise for !=
  1689. if (RHS->isZero()) {
  1690. Worklist.Add(II);
  1691. ICI.setOperand(0, II->getArgOperand(0));
  1692. ICI.setOperand(1, RHS);
  1693. return &ICI;
  1694. }
  1695. break;
  1696. default:
  1697. break;
  1698. }
  1699. }
  1700. }
  1701. return nullptr;
  1702. }
  1703. /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
  1704. /// We only handle extending casts so far.
  1705. ///
  1706. Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
  1707. const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
  1708. Value *LHSCIOp = LHSCI->getOperand(0);
  1709. Type *SrcTy = LHSCIOp->getType();
  1710. Type *DestTy = LHSCI->getType();
  1711. Value *RHSCIOp;
  1712. // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
  1713. // integer type is the same size as the pointer type.
  1714. if (LHSCI->getOpcode() == Instruction::PtrToInt &&
  1715. DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
  1716. Value *RHSOp = nullptr;
  1717. if (PtrToIntOperator *RHSC = dyn_cast<PtrToIntOperator>(ICI.getOperand(1))) {
  1718. Value *RHSCIOp = RHSC->getOperand(0);
  1719. if (RHSCIOp->getType()->getPointerAddressSpace() ==
  1720. LHSCIOp->getType()->getPointerAddressSpace()) {
  1721. RHSOp = RHSC->getOperand(0);
  1722. // If the pointer types don't match, insert a bitcast.
  1723. if (LHSCIOp->getType() != RHSOp->getType())
  1724. RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
  1725. }
  1726. } else if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1)))
  1727. RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
  1728. if (RHSOp)
  1729. return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp);
  1730. }
  1731. // The code below only handles extension cast instructions, so far.
  1732. // Enforce this.
  1733. if (LHSCI->getOpcode() != Instruction::ZExt &&
  1734. LHSCI->getOpcode() != Instruction::SExt)
  1735. return nullptr;
  1736. bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
  1737. bool isSignedCmp = ICI.isSigned();
  1738. if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
  1739. // Not an extension from the same type?
  1740. RHSCIOp = CI->getOperand(0);
  1741. if (RHSCIOp->getType() != LHSCIOp->getType())
  1742. return nullptr;
  1743. // If the signedness of the two casts doesn't agree (i.e. one is a sext
  1744. // and the other is a zext), then we can't handle this.
  1745. if (CI->getOpcode() != LHSCI->getOpcode())
  1746. return nullptr;
  1747. // Deal with equality cases early.
  1748. if (ICI.isEquality())
  1749. return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
  1750. // A signed comparison of sign extended values simplifies into a
  1751. // signed comparison.
  1752. if (isSignedCmp && isSignedExt)
  1753. return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
  1754. // The other three cases all fold into an unsigned comparison.
  1755. return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
  1756. }
  1757. // If we aren't dealing with a constant on the RHS, exit early
  1758. ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
  1759. if (!CI)
  1760. return nullptr;
  1761. // Compute the constant that would happen if we truncated to SrcTy then
  1762. // reextended to DestTy.
  1763. Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy);
  1764. Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(),
  1765. Res1, DestTy);
  1766. // If the re-extended constant didn't change...
  1767. if (Res2 == CI) {
  1768. // Deal with equality cases early.
  1769. if (ICI.isEquality())
  1770. return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
  1771. // A signed comparison of sign extended values simplifies into a
  1772. // signed comparison.
  1773. if (isSignedExt && isSignedCmp)
  1774. return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
  1775. // The other three cases all fold into an unsigned comparison.
  1776. return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, Res1);
  1777. }
  1778. // The re-extended constant changed so the constant cannot be represented
  1779. // in the shorter type. Consequently, we cannot emit a simple comparison.
  1780. // All the cases that fold to true or false will have already been handled
  1781. // by SimplifyICmpInst, so only deal with the tricky case.
  1782. if (isSignedCmp || !isSignedExt)
  1783. return nullptr;
  1784. // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
  1785. // should have been folded away previously and not enter in here.
  1786. // We're performing an unsigned comp with a sign extended value.
  1787. // This is true if the input is >= 0. [aka >s -1]
  1788. Constant *NegOne = Constant::getAllOnesValue(SrcTy);
  1789. Value *Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName());
  1790. // Finally, return the value computed.
  1791. if (ICI.getPredicate() == ICmpInst::ICMP_ULT)
  1792. return ReplaceInstUsesWith(ICI, Result);
  1793. assert(ICI.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
  1794. return BinaryOperator::CreateNot(Result);
  1795. }
  1796. /// ProcessUGT_ADDCST_ADD - The caller has matched a pattern of the form:
  1797. /// I = icmp ugt (add (add A, B), CI2), CI1
  1798. /// If this is of the form:
  1799. /// sum = a + b
  1800. /// if (sum+128 >u 255)
  1801. /// Then replace it with llvm.sadd.with.overflow.i8.
  1802. ///
  1803. static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
  1804. ConstantInt *CI2, ConstantInt *CI1,
  1805. InstCombiner &IC) {
  1806. // The transformation we're trying to do here is to transform this into an
  1807. // llvm.sadd.with.overflow. To do this, we have to replace the original add
  1808. // with a narrower add, and discard the add-with-constant that is part of the
  1809. // range check (if we can't eliminate it, this isn't profitable).
  1810. // In order to eliminate the add-with-constant, the compare can be its only
  1811. // use.
  1812. Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
  1813. if (!AddWithCst->hasOneUse()) return nullptr;
  1814. // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
  1815. if (!CI2->getValue().isPowerOf2()) return nullptr;
  1816. unsigned NewWidth = CI2->getValue().countTrailingZeros();
  1817. if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) return nullptr;
  1818. // The width of the new add formed is 1 more than the bias.
  1819. ++NewWidth;
  1820. // Check to see that CI1 is an all-ones value with NewWidth bits.
  1821. if (CI1->getBitWidth() == NewWidth ||
  1822. CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
  1823. return nullptr;
  1824. // This is only really a signed overflow check if the inputs have been
  1825. // sign-extended; check for that condition. For example, if CI2 is 2^31 and
  1826. // the operands of the add are 64 bits wide, we need at least 33 sign bits.
  1827. unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
  1828. if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
  1829. IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
  1830. return nullptr;
  1831. // In order to replace the original add with a narrower
  1832. // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
  1833. // and truncates that discard the high bits of the add. Verify that this is
  1834. // the case.
  1835. Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
  1836. for (User *U : OrigAdd->users()) {
  1837. if (U == AddWithCst) continue;
  1838. // Only accept truncates for now. We would really like a nice recursive
  1839. // predicate like SimplifyDemandedBits, but which goes downwards the use-def
  1840. // chain to see which bits of a value are actually demanded. If the
  1841. // original add had another add which was then immediately truncated, we
  1842. // could still do the transformation.
  1843. TruncInst *TI = dyn_cast<TruncInst>(U);
  1844. if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
  1845. return nullptr;
  1846. }
  1847. // If the pattern matches, truncate the inputs to the narrower type and
  1848. // use the sadd_with_overflow intrinsic to efficiently compute both the
  1849. // result and the overflow bit.
  1850. Module *M = I.getParent()->getParent()->getParent();
  1851. Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
  1852. Value *F = Intrinsic::getDeclaration(M, Intrinsic::sadd_with_overflow,
  1853. NewType);
  1854. InstCombiner::BuilderTy *Builder = IC.Builder;
  1855. // Put the new code above the original add, in case there are any uses of the
  1856. // add between the add and the compare.
  1857. Builder->SetInsertPoint(OrigAdd);
  1858. Value *TruncA = Builder->CreateTrunc(A, NewType, A->getName()+".trunc");
  1859. Value *TruncB = Builder->CreateTrunc(B, NewType, B->getName()+".trunc");
  1860. CallInst *Call = Builder->CreateCall(F, {TruncA, TruncB}, "sadd");
  1861. Value *Add = Builder->CreateExtractValue(Call, 0, "sadd.result");
  1862. Value *ZExt = Builder->CreateZExt(Add, OrigAdd->getType());
  1863. // The inner add was the result of the narrow add, zero extended to the
  1864. // wider type. Replace it with the result computed by the intrinsic.
  1865. IC.ReplaceInstUsesWith(*OrigAdd, ZExt);
  1866. // The original icmp gets replaced with the overflow value.
  1867. return ExtractValueInst::Create(Call, 1, "sadd.overflow");
  1868. }
  1869. bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
  1870. Value *RHS, Instruction &OrigI,
  1871. Value *&Result, Constant *&Overflow) {
  1872. if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
  1873. std::swap(LHS, RHS);
  1874. auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) {
  1875. Result = OpResult;
  1876. Overflow = OverflowVal;
  1877. if (ReuseName)
  1878. Result->takeName(&OrigI);
  1879. return true;
  1880. };
  1881. switch (OCF) {
  1882. case OCF_INVALID:
  1883. llvm_unreachable("bad overflow check kind!");
  1884. case OCF_UNSIGNED_ADD: {
  1885. OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI);
  1886. if (OR == OverflowResult::NeverOverflows)
  1887. return SetResult(Builder->CreateNUWAdd(LHS, RHS), Builder->getFalse(),
  1888. true);
  1889. if (OR == OverflowResult::AlwaysOverflows)
  1890. return SetResult(Builder->CreateAdd(LHS, RHS), Builder->getTrue(), true);
  1891. }
  1892. // FALL THROUGH uadd into sadd
  1893. case OCF_SIGNED_ADD: {
  1894. // X + 0 -> {X, false}
  1895. if (match(RHS, m_Zero()))
  1896. return SetResult(LHS, Builder->getFalse(), false);
  1897. // We can strength reduce this signed add into a regular add if we can prove
  1898. // that it will never overflow.
  1899. if (OCF == OCF_SIGNED_ADD)
  1900. if (WillNotOverflowSignedAdd(LHS, RHS, OrigI))
  1901. return SetResult(Builder->CreateNSWAdd(LHS, RHS), Builder->getFalse(),
  1902. true);
  1903. break;
  1904. }
  1905. case OCF_UNSIGNED_SUB:
  1906. case OCF_SIGNED_SUB: {
  1907. // X - 0 -> {X, false}
  1908. if (match(RHS, m_Zero()))
  1909. return SetResult(LHS, Builder->getFalse(), false);
  1910. if (OCF == OCF_SIGNED_SUB) {
  1911. if (WillNotOverflowSignedSub(LHS, RHS, OrigI))
  1912. return SetResult(Builder->CreateNSWSub(LHS, RHS), Builder->getFalse(),
  1913. true);
  1914. } else {
  1915. if (WillNotOverflowUnsignedSub(LHS, RHS, OrigI))
  1916. return SetResult(Builder->CreateNUWSub(LHS, RHS), Builder->getFalse(),
  1917. true);
  1918. }
  1919. break;
  1920. }
  1921. case OCF_UNSIGNED_MUL: {
  1922. OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI);
  1923. if (OR == OverflowResult::NeverOverflows)
  1924. return SetResult(Builder->CreateNUWMul(LHS, RHS), Builder->getFalse(),
  1925. true);
  1926. if (OR == OverflowResult::AlwaysOverflows)
  1927. return SetResult(Builder->CreateMul(LHS, RHS), Builder->getTrue(), true);
  1928. } // FALL THROUGH
  1929. case OCF_SIGNED_MUL:
  1930. // X * undef -> undef
  1931. if (isa<UndefValue>(RHS))
  1932. return SetResult(RHS, UndefValue::get(Builder->getInt1Ty()), false);
  1933. // X * 0 -> {0, false}
  1934. if (match(RHS, m_Zero()))
  1935. return SetResult(RHS, Builder->getFalse(), false);
  1936. // X * 1 -> {X, false}
  1937. if (match(RHS, m_One()))
  1938. return SetResult(LHS, Builder->getFalse(), false);
  1939. if (OCF == OCF_SIGNED_MUL)
  1940. if (WillNotOverflowSignedMul(LHS, RHS, OrigI))
  1941. return SetResult(Builder->CreateNSWMul(LHS, RHS), Builder->getFalse(),
  1942. true);
  1943. break;
  1944. }
  1945. return false;
  1946. }
  1947. /// \brief Recognize and process idiom involving test for multiplication
  1948. /// overflow.
  1949. ///
  1950. /// The caller has matched a pattern of the form:
  1951. /// I = cmp u (mul(zext A, zext B), V
  1952. /// The function checks if this is a test for overflow and if so replaces
  1953. /// multiplication with call to 'mul.with.overflow' intrinsic.
  1954. ///
  1955. /// \param I Compare instruction.
  1956. /// \param MulVal Result of 'mult' instruction. It is one of the arguments of
  1957. /// the compare instruction. Must be of integer type.
  1958. /// \param OtherVal The other argument of compare instruction.
  1959. /// \returns Instruction which must replace the compare instruction, NULL if no
  1960. /// replacement required.
  1961. static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
  1962. Value *OtherVal, InstCombiner &IC) {
  1963. // Don't bother doing this transformation for pointers, don't do it for
  1964. // vectors.
  1965. if (!isa<IntegerType>(MulVal->getType()))
  1966. return nullptr;
  1967. assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
  1968. assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
  1969. Instruction *MulInstr = cast<Instruction>(MulVal);
  1970. assert(MulInstr->getOpcode() == Instruction::Mul);
  1971. auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
  1972. *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
  1973. assert(LHS->getOpcode() == Instruction::ZExt);
  1974. assert(RHS->getOpcode() == Instruction::ZExt);
  1975. Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
  1976. // Calculate type and width of the result produced by mul.with.overflow.
  1977. Type *TyA = A->getType(), *TyB = B->getType();
  1978. unsigned WidthA = TyA->getPrimitiveSizeInBits(),
  1979. WidthB = TyB->getPrimitiveSizeInBits();
  1980. unsigned MulWidth;
  1981. Type *MulType;
  1982. if (WidthB > WidthA) {
  1983. MulWidth = WidthB;
  1984. MulType = TyB;
  1985. } else {
  1986. MulWidth = WidthA;
  1987. MulType = TyA;
  1988. }
  1989. // In order to replace the original mul with a narrower mul.with.overflow,
  1990. // all uses must ignore upper bits of the product. The number of used low
  1991. // bits must be not greater than the width of mul.with.overflow.
  1992. if (MulVal->hasNUsesOrMore(2))
  1993. for (User *U : MulVal->users()) {
  1994. if (U == &I)
  1995. continue;
  1996. if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
  1997. // Check if truncation ignores bits above MulWidth.
  1998. unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
  1999. if (TruncWidth > MulWidth)
  2000. return nullptr;
  2001. } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
  2002. // Check if AND ignores bits above MulWidth.
  2003. if (BO->getOpcode() != Instruction::And)
  2004. return nullptr;
  2005. if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
  2006. const APInt &CVal = CI->getValue();
  2007. if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
  2008. return nullptr;
  2009. }
  2010. } else {
  2011. // Other uses prohibit this transformation.
  2012. return nullptr;
  2013. }
  2014. }
  2015. // Recognize patterns
  2016. switch (I.getPredicate()) {
  2017. case ICmpInst::ICMP_EQ:
  2018. case ICmpInst::ICMP_NE:
  2019. // Recognize pattern:
  2020. // mulval = mul(zext A, zext B)
  2021. // cmp eq/neq mulval, zext trunc mulval
  2022. if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal))
  2023. if (Zext->hasOneUse()) {
  2024. Value *ZextArg = Zext->getOperand(0);
  2025. if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg))
  2026. if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth)
  2027. break; //Recognized
  2028. }
  2029. // Recognize pattern:
  2030. // mulval = mul(zext A, zext B)
  2031. // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
  2032. ConstantInt *CI;
  2033. Value *ValToMask;
  2034. if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
  2035. if (ValToMask != MulVal)
  2036. return nullptr;
  2037. const APInt &CVal = CI->getValue() + 1;
  2038. if (CVal.isPowerOf2()) {
  2039. unsigned MaskWidth = CVal.logBase2();
  2040. if (MaskWidth == MulWidth)
  2041. break; // Recognized
  2042. }
  2043. }
  2044. return nullptr;
  2045. case ICmpInst::ICMP_UGT:
  2046. // Recognize pattern:
  2047. // mulval = mul(zext A, zext B)
  2048. // cmp ugt mulval, max
  2049. if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
  2050. APInt MaxVal = APInt::getMaxValue(MulWidth);
  2051. MaxVal = MaxVal.zext(CI->getBitWidth());
  2052. if (MaxVal.eq(CI->getValue()))
  2053. break; // Recognized
  2054. }
  2055. return nullptr;
  2056. case ICmpInst::ICMP_UGE:
  2057. // Recognize pattern:
  2058. // mulval = mul(zext A, zext B)
  2059. // cmp uge mulval, max+1
  2060. if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
  2061. APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
  2062. if (MaxVal.eq(CI->getValue()))
  2063. break; // Recognized
  2064. }
  2065. return nullptr;
  2066. case ICmpInst::ICMP_ULE:
  2067. // Recognize pattern:
  2068. // mulval = mul(zext A, zext B)
  2069. // cmp ule mulval, max
  2070. if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
  2071. APInt MaxVal = APInt::getMaxValue(MulWidth);
  2072. MaxVal = MaxVal.zext(CI->getBitWidth());
  2073. if (MaxVal.eq(CI->getValue()))
  2074. break; // Recognized
  2075. }
  2076. return nullptr;
  2077. case ICmpInst::ICMP_ULT:
  2078. // Recognize pattern:
  2079. // mulval = mul(zext A, zext B)
  2080. // cmp ule mulval, max + 1
  2081. if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
  2082. APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
  2083. if (MaxVal.eq(CI->getValue()))
  2084. break; // Recognized
  2085. }
  2086. return nullptr;
  2087. default:
  2088. return nullptr;
  2089. }
  2090. InstCombiner::BuilderTy *Builder = IC.Builder;
  2091. Builder->SetInsertPoint(MulInstr);
  2092. Module *M = I.getParent()->getParent()->getParent();
  2093. // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
  2094. Value *MulA = A, *MulB = B;
  2095. if (WidthA < MulWidth)
  2096. MulA = Builder->CreateZExt(A, MulType);
  2097. if (WidthB < MulWidth)
  2098. MulB = Builder->CreateZExt(B, MulType);
  2099. Value *F =
  2100. Intrinsic::getDeclaration(M, Intrinsic::umul_with_overflow, MulType);
  2101. CallInst *Call = Builder->CreateCall(F, {MulA, MulB}, "umul");
  2102. IC.Worklist.Add(MulInstr);
  2103. // If there are uses of mul result other than the comparison, we know that
  2104. // they are truncation or binary AND. Change them to use result of
  2105. // mul.with.overflow and adjust properly mask/size.
  2106. if (MulVal->hasNUsesOrMore(2)) {
  2107. Value *Mul = Builder->CreateExtractValue(Call, 0, "umul.value");
  2108. for (User *U : MulVal->users()) {
  2109. if (U == &I || U == OtherVal)
  2110. continue;
  2111. if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
  2112. if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
  2113. IC.ReplaceInstUsesWith(*TI, Mul);
  2114. else
  2115. TI->setOperand(0, Mul);
  2116. } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
  2117. assert(BO->getOpcode() == Instruction::And);
  2118. // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
  2119. ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
  2120. APInt ShortMask = CI->getValue().trunc(MulWidth);
  2121. Value *ShortAnd = Builder->CreateAnd(Mul, ShortMask);
  2122. Instruction *Zext =
  2123. cast<Instruction>(Builder->CreateZExt(ShortAnd, BO->getType()));
  2124. IC.Worklist.Add(Zext);
  2125. IC.ReplaceInstUsesWith(*BO, Zext);
  2126. } else {
  2127. llvm_unreachable("Unexpected Binary operation");
  2128. }
  2129. IC.Worklist.Add(cast<Instruction>(U));
  2130. }
  2131. }
  2132. if (isa<Instruction>(OtherVal))
  2133. IC.Worklist.Add(cast<Instruction>(OtherVal));
  2134. // The original icmp gets replaced with the overflow value, maybe inverted
  2135. // depending on predicate.
  2136. bool Inverse = false;
  2137. switch (I.getPredicate()) {
  2138. case ICmpInst::ICMP_NE:
  2139. break;
  2140. case ICmpInst::ICMP_EQ:
  2141. Inverse = true;
  2142. break;
  2143. case ICmpInst::ICMP_UGT:
  2144. case ICmpInst::ICMP_UGE:
  2145. if (I.getOperand(0) == MulVal)
  2146. break;
  2147. Inverse = true;
  2148. break;
  2149. case ICmpInst::ICMP_ULT:
  2150. case ICmpInst::ICMP_ULE:
  2151. if (I.getOperand(1) == MulVal)
  2152. break;
  2153. Inverse = true;
  2154. break;
  2155. default:
  2156. llvm_unreachable("Unexpected predicate");
  2157. }
  2158. if (Inverse) {
  2159. Value *Res = Builder->CreateExtractValue(Call, 1);
  2160. return BinaryOperator::CreateNot(Res);
  2161. }
  2162. return ExtractValueInst::Create(Call, 1);
  2163. }
  2164. // DemandedBitsLHSMask - When performing a comparison against a constant,
  2165. // it is possible that not all the bits in the LHS are demanded. This helper
  2166. // method computes the mask that IS demanded.
  2167. static APInt DemandedBitsLHSMask(ICmpInst &I,
  2168. unsigned BitWidth, bool isSignCheck) {
  2169. if (isSignCheck)
  2170. return APInt::getSignBit(BitWidth);
  2171. ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
  2172. if (!CI) return APInt::getAllOnesValue(BitWidth);
  2173. const APInt &RHS = CI->getValue();
  2174. switch (I.getPredicate()) {
  2175. // For a UGT comparison, we don't care about any bits that
  2176. // correspond to the trailing ones of the comparand. The value of these
  2177. // bits doesn't impact the outcome of the comparison, because any value
  2178. // greater than the RHS must differ in a bit higher than these due to carry.
  2179. case ICmpInst::ICMP_UGT: {
  2180. unsigned trailingOnes = RHS.countTrailingOnes();
  2181. APInt lowBitsSet = APInt::getLowBitsSet(BitWidth, trailingOnes);
  2182. return ~lowBitsSet;
  2183. }
  2184. // Similarly, for a ULT comparison, we don't care about the trailing zeros.
  2185. // Any value less than the RHS must differ in a higher bit because of carries.
  2186. case ICmpInst::ICMP_ULT: {
  2187. unsigned trailingZeros = RHS.countTrailingZeros();
  2188. APInt lowBitsSet = APInt::getLowBitsSet(BitWidth, trailingZeros);
  2189. return ~lowBitsSet;
  2190. }
  2191. default:
  2192. return APInt::getAllOnesValue(BitWidth);
  2193. }
  2194. }
  2195. /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
  2196. /// should be swapped.
  2197. /// The decision is based on how many times these two operands are reused
  2198. /// as subtract operands and their positions in those instructions.
  2199. /// The rational is that several architectures use the same instruction for
  2200. /// both subtract and cmp, thus it is better if the order of those operands
  2201. /// match.
  2202. /// \return true if Op0 and Op1 should be swapped.
  2203. static bool swapMayExposeCSEOpportunities(const Value * Op0,
  2204. const Value * Op1) {
  2205. // Filter out pointer value as those cannot appears directly in subtract.
  2206. // FIXME: we may want to go through inttoptrs or bitcasts.
  2207. if (Op0->getType()->isPointerTy())
  2208. return false;
  2209. // Count every uses of both Op0 and Op1 in a subtract.
  2210. // Each time Op0 is the first operand, count -1: swapping is bad, the
  2211. // subtract has already the same layout as the compare.
  2212. // Each time Op0 is the second operand, count +1: swapping is good, the
  2213. // subtract has a different layout as the compare.
  2214. // At the end, if the benefit is greater than 0, Op0 should come second to
  2215. // expose more CSE opportunities.
  2216. int GlobalSwapBenefits = 0;
  2217. for (const User *U : Op0->users()) {
  2218. const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U);
  2219. if (!BinOp || BinOp->getOpcode() != Instruction::Sub)
  2220. continue;
  2221. // If Op0 is the first argument, this is not beneficial to swap the
  2222. // arguments.
  2223. int LocalSwapBenefits = -1;
  2224. unsigned Op1Idx = 1;
  2225. if (BinOp->getOperand(Op1Idx) == Op0) {
  2226. Op1Idx = 0;
  2227. LocalSwapBenefits = 1;
  2228. }
  2229. if (BinOp->getOperand(Op1Idx) != Op1)
  2230. continue;
  2231. GlobalSwapBenefits += LocalSwapBenefits;
  2232. }
  2233. return GlobalSwapBenefits > 0;
  2234. }
  2235. /// \brief Check that one use is in the same block as the definition and all
  2236. /// other uses are in blocks dominated by a given block
  2237. ///
  2238. /// \param DI Definition
  2239. /// \param UI Use
  2240. /// \param DB Block that must dominate all uses of \p DI outside
  2241. /// the parent block
  2242. /// \return true when \p UI is the only use of \p DI in the parent block
  2243. /// and all other uses of \p DI are in blocks dominated by \p DB.
  2244. ///
  2245. bool InstCombiner::dominatesAllUses(const Instruction *DI,
  2246. const Instruction *UI,
  2247. const BasicBlock *DB) const {
  2248. assert(DI && UI && "Instruction not defined\n");
  2249. // ignore incomplete definitions
  2250. if (!DI->getParent())
  2251. return false;
  2252. // DI and UI must be in the same block
  2253. if (DI->getParent() != UI->getParent())
  2254. return false;
  2255. // Protect from self-referencing blocks
  2256. if (DI->getParent() == DB)
  2257. return false;
  2258. // DominatorTree available?
  2259. if (!DT)
  2260. return false;
  2261. for (const User *U : DI->users()) {
  2262. auto *Usr = cast<Instruction>(U);
  2263. if (Usr != UI && !DT->dominates(DB, Usr->getParent()))
  2264. return false;
  2265. }
  2266. return true;
  2267. }
  2268. ///
  2269. /// true when the instruction sequence within a block is select-cmp-br.
  2270. ///
  2271. static bool isChainSelectCmpBranch(const SelectInst *SI) {
  2272. const BasicBlock *BB = SI->getParent();
  2273. if (!BB)
  2274. return false;
  2275. auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
  2276. if (!BI || BI->getNumSuccessors() != 2)
  2277. return false;
  2278. auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
  2279. if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
  2280. return false;
  2281. return true;
  2282. }
  2283. ///
  2284. /// \brief True when a select result is replaced by one of its operands
  2285. /// in select-icmp sequence. This will eventually result in the elimination
  2286. /// of the select.
  2287. ///
  2288. /// \param SI Select instruction
  2289. /// \param Icmp Compare instruction
  2290. /// \param SIOpd Operand that replaces the select
  2291. ///
  2292. /// Notes:
  2293. /// - The replacement is global and requires dominator information
  2294. /// - The caller is responsible for the actual replacement
  2295. ///
  2296. /// Example:
  2297. ///
  2298. /// entry:
  2299. /// %4 = select i1 %3, %C* %0, %C* null
  2300. /// %5 = icmp eq %C* %4, null
  2301. /// br i1 %5, label %9, label %7
  2302. /// ...
  2303. /// ; <label>:7 ; preds = %entry
  2304. /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
  2305. /// ...
  2306. ///
  2307. /// can be transformed to
  2308. ///
  2309. /// %5 = icmp eq %C* %0, null
  2310. /// %6 = select i1 %3, i1 %5, i1 true
  2311. /// br i1 %6, label %9, label %7
  2312. /// ...
  2313. /// ; <label>:7 ; preds = %entry
  2314. /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
  2315. ///
  2316. /// Similar when the first operand of the select is a constant or/and
  2317. /// the compare is for not equal rather than equal.
  2318. ///
  2319. /// NOTE: The function is only called when the select and compare constants
  2320. /// are equal, the optimization can work only for EQ predicates. This is not a
  2321. /// major restriction since a NE compare should be 'normalized' to an equal
  2322. /// compare, which usually happens in the combiner and test case
  2323. /// select-cmp-br.ll
  2324. /// checks for it.
  2325. bool InstCombiner::replacedSelectWithOperand(SelectInst *SI,
  2326. const ICmpInst *Icmp,
  2327. const unsigned SIOpd) {
  2328. assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
  2329. if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
  2330. BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
  2331. // The check for the unique predecessor is not the best that can be
  2332. // done. But it protects efficiently against cases like when SI's
  2333. // home block has two successors, Succ and Succ1, and Succ1 predecessor
  2334. // of Succ. Then SI can't be replaced by SIOpd because the use that gets
  2335. // replaced can be reached on either path. So the uniqueness check
  2336. // guarantees that the path all uses of SI (outside SI's parent) are on
  2337. // is disjoint from all other paths out of SI. But that information
  2338. // is more expensive to compute, and the trade-off here is in favor
  2339. // of compile-time.
  2340. if (Succ->getUniquePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
  2341. NumSel++;
  2342. SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
  2343. return true;
  2344. }
  2345. }
  2346. return false;
  2347. }
  2348. Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
  2349. bool Changed = false;
  2350. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  2351. unsigned Op0Cplxity = getComplexity(Op0);
  2352. unsigned Op1Cplxity = getComplexity(Op1);
  2353. /// Orders the operands of the compare so that they are listed from most
  2354. /// complex to least complex. This puts constants before unary operators,
  2355. /// before binary operators.
  2356. if (Op0Cplxity < Op1Cplxity ||
  2357. (Op0Cplxity == Op1Cplxity &&
  2358. swapMayExposeCSEOpportunities(Op0, Op1))) {
  2359. I.swapOperands();
  2360. std::swap(Op0, Op1);
  2361. Changed = true;
  2362. }
  2363. if (Value *V =
  2364. SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC, &I))
  2365. return ReplaceInstUsesWith(I, V);
  2366. // comparing -val or val with non-zero is the same as just comparing val
  2367. // ie, abs(val) != 0 -> val != 0
  2368. if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero()))
  2369. {
  2370. Value *Cond, *SelectTrue, *SelectFalse;
  2371. if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
  2372. m_Value(SelectFalse)))) {
  2373. if (Value *V = dyn_castNegVal(SelectTrue)) {
  2374. if (V == SelectFalse)
  2375. return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
  2376. }
  2377. else if (Value *V = dyn_castNegVal(SelectFalse)) {
  2378. if (V == SelectTrue)
  2379. return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
  2380. }
  2381. }
  2382. }
  2383. Type *Ty = Op0->getType();
  2384. // icmp's with boolean values can always be turned into bitwise operations
  2385. if (Ty->isIntegerTy(1)) {
  2386. switch (I.getPredicate()) {
  2387. default: llvm_unreachable("Invalid icmp instruction!");
  2388. case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
  2389. Value *Xor = Builder->CreateXor(Op0, Op1, I.getName()+"tmp");
  2390. return BinaryOperator::CreateNot(Xor);
  2391. }
  2392. case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B
  2393. return BinaryOperator::CreateXor(Op0, Op1);
  2394. case ICmpInst::ICMP_UGT:
  2395. std::swap(Op0, Op1); // Change icmp ugt -> icmp ult
  2396. // FALL THROUGH
  2397. case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B
  2398. Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
  2399. return BinaryOperator::CreateAnd(Not, Op1);
  2400. }
  2401. case ICmpInst::ICMP_SGT:
  2402. std::swap(Op0, Op1); // Change icmp sgt -> icmp slt
  2403. // FALL THROUGH
  2404. case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B
  2405. Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
  2406. return BinaryOperator::CreateAnd(Not, Op0);
  2407. }
  2408. case ICmpInst::ICMP_UGE:
  2409. std::swap(Op0, Op1); // Change icmp uge -> icmp ule
  2410. // FALL THROUGH
  2411. case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B
  2412. Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
  2413. return BinaryOperator::CreateOr(Not, Op1);
  2414. }
  2415. case ICmpInst::ICMP_SGE:
  2416. std::swap(Op0, Op1); // Change icmp sge -> icmp sle
  2417. // FALL THROUGH
  2418. case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B
  2419. Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
  2420. return BinaryOperator::CreateOr(Not, Op0);
  2421. }
  2422. }
  2423. }
  2424. unsigned BitWidth = 0;
  2425. if (Ty->isIntOrIntVectorTy())
  2426. BitWidth = Ty->getScalarSizeInBits();
  2427. else // Get pointer size.
  2428. BitWidth = DL.getTypeSizeInBits(Ty->getScalarType());
  2429. bool isSignBit = false;
  2430. // See if we are doing a comparison with a constant.
  2431. if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
  2432. Value *A = nullptr, *B = nullptr;
  2433. // Match the following pattern, which is a common idiom when writing
  2434. // overflow-safe integer arithmetic function. The source performs an
  2435. // addition in wider type, and explicitly checks for overflow using
  2436. // comparisons against INT_MIN and INT_MAX. Simplify this by using the
  2437. // sadd_with_overflow intrinsic.
  2438. //
  2439. // TODO: This could probably be generalized to handle other overflow-safe
  2440. // operations if we worked out the formulas to compute the appropriate
  2441. // magic constants.
  2442. //
  2443. // sum = a + b
  2444. // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
  2445. {
  2446. ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI
  2447. if (I.getPredicate() == ICmpInst::ICMP_UGT &&
  2448. match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
  2449. if (Instruction *Res = ProcessUGT_ADDCST_ADD(I, A, B, CI2, CI, *this))
  2450. return Res;
  2451. }
  2452. // The following transforms are only 'worth it' if the only user of the
  2453. // subtraction is the icmp.
  2454. if (Op0->hasOneUse()) {
  2455. // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
  2456. if (I.isEquality() && CI->isZero() &&
  2457. match(Op0, m_Sub(m_Value(A), m_Value(B))))
  2458. return new ICmpInst(I.getPredicate(), A, B);
  2459. // (icmp sgt (sub nsw A B), -1) -> (icmp sge A, B)
  2460. if (I.getPredicate() == ICmpInst::ICMP_SGT && CI->isAllOnesValue() &&
  2461. match(Op0, m_NSWSub(m_Value(A), m_Value(B))))
  2462. return new ICmpInst(ICmpInst::ICMP_SGE, A, B);
  2463. // (icmp sgt (sub nsw A B), 0) -> (icmp sgt A, B)
  2464. if (I.getPredicate() == ICmpInst::ICMP_SGT && CI->isZero() &&
  2465. match(Op0, m_NSWSub(m_Value(A), m_Value(B))))
  2466. return new ICmpInst(ICmpInst::ICMP_SGT, A, B);
  2467. // (icmp slt (sub nsw A B), 0) -> (icmp slt A, B)
  2468. if (I.getPredicate() == ICmpInst::ICMP_SLT && CI->isZero() &&
  2469. match(Op0, m_NSWSub(m_Value(A), m_Value(B))))
  2470. return new ICmpInst(ICmpInst::ICMP_SLT, A, B);
  2471. // (icmp slt (sub nsw A B), 1) -> (icmp sle A, B)
  2472. if (I.getPredicate() == ICmpInst::ICMP_SLT && CI->isOne() &&
  2473. match(Op0, m_NSWSub(m_Value(A), m_Value(B))))
  2474. return new ICmpInst(ICmpInst::ICMP_SLE, A, B);
  2475. }
  2476. // If we have an icmp le or icmp ge instruction, turn it into the
  2477. // appropriate icmp lt or icmp gt instruction. This allows us to rely on
  2478. // them being folded in the code below. The SimplifyICmpInst code has
  2479. // already handled the edge cases for us, so we just assert on them.
  2480. switch (I.getPredicate()) {
  2481. default: break;
  2482. case ICmpInst::ICMP_ULE:
  2483. assert(!CI->isMaxValue(false)); // A <=u MAX -> TRUE
  2484. return new ICmpInst(ICmpInst::ICMP_ULT, Op0,
  2485. Builder->getInt(CI->getValue()+1));
  2486. case ICmpInst::ICMP_SLE:
  2487. assert(!CI->isMaxValue(true)); // A <=s MAX -> TRUE
  2488. return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
  2489. Builder->getInt(CI->getValue()+1));
  2490. case ICmpInst::ICMP_UGE:
  2491. assert(!CI->isMinValue(false)); // A >=u MIN -> TRUE
  2492. return new ICmpInst(ICmpInst::ICMP_UGT, Op0,
  2493. Builder->getInt(CI->getValue()-1));
  2494. case ICmpInst::ICMP_SGE:
  2495. assert(!CI->isMinValue(true)); // A >=s MIN -> TRUE
  2496. return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
  2497. Builder->getInt(CI->getValue()-1));
  2498. }
  2499. if (I.isEquality()) {
  2500. ConstantInt *CI2;
  2501. if (match(Op0, m_AShr(m_ConstantInt(CI2), m_Value(A))) ||
  2502. match(Op0, m_LShr(m_ConstantInt(CI2), m_Value(A)))) {
  2503. // (icmp eq/ne (ashr/lshr const2, A), const1)
  2504. if (Instruction *Inst = FoldICmpCstShrCst(I, Op0, A, CI, CI2))
  2505. return Inst;
  2506. }
  2507. if (match(Op0, m_Shl(m_ConstantInt(CI2), m_Value(A)))) {
  2508. // (icmp eq/ne (shl const2, A), const1)
  2509. if (Instruction *Inst = FoldICmpCstShlCst(I, Op0, A, CI, CI2))
  2510. return Inst;
  2511. }
  2512. }
  2513. // If this comparison is a normal comparison, it demands all
  2514. // bits, if it is a sign bit comparison, it only demands the sign bit.
  2515. bool UnusedBit;
  2516. isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit);
  2517. }
  2518. // See if we can fold the comparison based on range information we can get
  2519. // by checking whether bits are known to be zero or one in the input.
  2520. if (BitWidth != 0) {
  2521. APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
  2522. APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
  2523. if (SimplifyDemandedBits(I.getOperandUse(0),
  2524. DemandedBitsLHSMask(I, BitWidth, isSignBit),
  2525. Op0KnownZero, Op0KnownOne, 0))
  2526. return &I;
  2527. if (SimplifyDemandedBits(I.getOperandUse(1),
  2528. APInt::getAllOnesValue(BitWidth), Op1KnownZero,
  2529. Op1KnownOne, 0))
  2530. return &I;
  2531. // Given the known and unknown bits, compute a range that the LHS could be
  2532. // in. Compute the Min, Max and RHS values based on the known bits. For the
  2533. // EQ and NE we use unsigned values.
  2534. APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
  2535. APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
  2536. if (I.isSigned()) {
  2537. ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
  2538. Op0Min, Op0Max);
  2539. ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
  2540. Op1Min, Op1Max);
  2541. } else {
  2542. ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
  2543. Op0Min, Op0Max);
  2544. ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
  2545. Op1Min, Op1Max);
  2546. }
  2547. // If Min and Max are known to be the same, then SimplifyDemandedBits
  2548. // figured out that the LHS is a constant. Just constant fold this now so
  2549. // that code below can assume that Min != Max.
  2550. if (!isa<Constant>(Op0) && Op0Min == Op0Max)
  2551. return new ICmpInst(I.getPredicate(),
  2552. ConstantInt::get(Op0->getType(), Op0Min), Op1);
  2553. if (!isa<Constant>(Op1) && Op1Min == Op1Max)
  2554. return new ICmpInst(I.getPredicate(), Op0,
  2555. ConstantInt::get(Op1->getType(), Op1Min));
  2556. // Based on the range information we know about the LHS, see if we can
  2557. // simplify this comparison. For example, (x&4) < 8 is always true.
  2558. switch (I.getPredicate()) {
  2559. default: llvm_unreachable("Unknown icmp opcode!");
  2560. case ICmpInst::ICMP_EQ: {
  2561. if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
  2562. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  2563. // If all bits are known zero except for one, then we know at most one
  2564. // bit is set. If the comparison is against zero, then this is a check
  2565. // to see if *that* bit is set.
  2566. APInt Op0KnownZeroInverted = ~Op0KnownZero;
  2567. if (~Op1KnownZero == 0) {
  2568. // If the LHS is an AND with the same constant, look through it.
  2569. Value *LHS = nullptr;
  2570. ConstantInt *LHSC = nullptr;
  2571. if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
  2572. LHSC->getValue() != Op0KnownZeroInverted)
  2573. LHS = Op0;
  2574. // If the LHS is 1 << x, and we know the result is a power of 2 like 8,
  2575. // then turn "((1 << x)&8) == 0" into "x != 3".
  2576. // or turn "((1 << x)&7) == 0" into "x > 2".
  2577. Value *X = nullptr;
  2578. if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
  2579. APInt ValToCheck = Op0KnownZeroInverted;
  2580. if (ValToCheck.isPowerOf2()) {
  2581. unsigned CmpVal = ValToCheck.countTrailingZeros();
  2582. return new ICmpInst(ICmpInst::ICMP_NE, X,
  2583. ConstantInt::get(X->getType(), CmpVal));
  2584. } else if ((++ValToCheck).isPowerOf2()) {
  2585. unsigned CmpVal = ValToCheck.countTrailingZeros() - 1;
  2586. return new ICmpInst(ICmpInst::ICMP_UGT, X,
  2587. ConstantInt::get(X->getType(), CmpVal));
  2588. }
  2589. }
  2590. // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
  2591. // then turn "((8 >>u x)&1) == 0" into "x != 3".
  2592. const APInt *CI;
  2593. if (Op0KnownZeroInverted == 1 &&
  2594. match(LHS, m_LShr(m_Power2(CI), m_Value(X))))
  2595. return new ICmpInst(ICmpInst::ICMP_NE, X,
  2596. ConstantInt::get(X->getType(),
  2597. CI->countTrailingZeros()));
  2598. }
  2599. break;
  2600. }
  2601. case ICmpInst::ICMP_NE: {
  2602. if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
  2603. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  2604. // If all bits are known zero except for one, then we know at most one
  2605. // bit is set. If the comparison is against zero, then this is a check
  2606. // to see if *that* bit is set.
  2607. APInt Op0KnownZeroInverted = ~Op0KnownZero;
  2608. if (~Op1KnownZero == 0) {
  2609. // If the LHS is an AND with the same constant, look through it.
  2610. Value *LHS = nullptr;
  2611. ConstantInt *LHSC = nullptr;
  2612. if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
  2613. LHSC->getValue() != Op0KnownZeroInverted)
  2614. LHS = Op0;
  2615. // If the LHS is 1 << x, and we know the result is a power of 2 like 8,
  2616. // then turn "((1 << x)&8) != 0" into "x == 3".
  2617. // or turn "((1 << x)&7) != 0" into "x < 3".
  2618. Value *X = nullptr;
  2619. if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
  2620. APInt ValToCheck = Op0KnownZeroInverted;
  2621. if (ValToCheck.isPowerOf2()) {
  2622. unsigned CmpVal = ValToCheck.countTrailingZeros();
  2623. return new ICmpInst(ICmpInst::ICMP_EQ, X,
  2624. ConstantInt::get(X->getType(), CmpVal));
  2625. } else if ((++ValToCheck).isPowerOf2()) {
  2626. unsigned CmpVal = ValToCheck.countTrailingZeros();
  2627. return new ICmpInst(ICmpInst::ICMP_ULT, X,
  2628. ConstantInt::get(X->getType(), CmpVal));
  2629. }
  2630. }
  2631. // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
  2632. // then turn "((8 >>u x)&1) != 0" into "x == 3".
  2633. const APInt *CI;
  2634. if (Op0KnownZeroInverted == 1 &&
  2635. match(LHS, m_LShr(m_Power2(CI), m_Value(X))))
  2636. return new ICmpInst(ICmpInst::ICMP_EQ, X,
  2637. ConstantInt::get(X->getType(),
  2638. CI->countTrailingZeros()));
  2639. }
  2640. break;
  2641. }
  2642. case ICmpInst::ICMP_ULT:
  2643. if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
  2644. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  2645. if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
  2646. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  2647. if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
  2648. return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
  2649. if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
  2650. if (Op1Max == Op0Min+1) // A <u C -> A == C-1 if min(A)+1 == C
  2651. return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
  2652. Builder->getInt(CI->getValue()-1));
  2653. // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
  2654. if (CI->isMinValue(true))
  2655. return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
  2656. Constant::getAllOnesValue(Op0->getType()));
  2657. }
  2658. break;
  2659. case ICmpInst::ICMP_UGT:
  2660. if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
  2661. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  2662. if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
  2663. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  2664. if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
  2665. return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
  2666. if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
  2667. if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C
  2668. return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
  2669. Builder->getInt(CI->getValue()+1));
  2670. // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
  2671. if (CI->isMaxValue(true))
  2672. return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
  2673. Constant::getNullValue(Op0->getType()));
  2674. }
  2675. break;
  2676. case ICmpInst::ICMP_SLT:
  2677. if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
  2678. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  2679. if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
  2680. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  2681. if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
  2682. return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
  2683. if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
  2684. if (Op1Max == Op0Min+1) // A <s C -> A == C-1 if min(A)+1 == C
  2685. return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
  2686. Builder->getInt(CI->getValue()-1));
  2687. }
  2688. break;
  2689. case ICmpInst::ICMP_SGT:
  2690. if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
  2691. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  2692. if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
  2693. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  2694. if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
  2695. return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
  2696. if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
  2697. if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C
  2698. return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
  2699. Builder->getInt(CI->getValue()+1));
  2700. }
  2701. break;
  2702. case ICmpInst::ICMP_SGE:
  2703. assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
  2704. if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
  2705. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  2706. if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
  2707. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  2708. break;
  2709. case ICmpInst::ICMP_SLE:
  2710. assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
  2711. if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
  2712. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  2713. if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
  2714. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  2715. break;
  2716. case ICmpInst::ICMP_UGE:
  2717. assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
  2718. if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
  2719. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  2720. if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
  2721. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  2722. break;
  2723. case ICmpInst::ICMP_ULE:
  2724. assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
  2725. if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
  2726. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  2727. if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
  2728. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  2729. break;
  2730. }
  2731. // Turn a signed comparison into an unsigned one if both operands
  2732. // are known to have the same sign.
  2733. if (I.isSigned() &&
  2734. ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
  2735. (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
  2736. return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
  2737. }
  2738. // Test if the ICmpInst instruction is used exclusively by a select as
  2739. // part of a minimum or maximum operation. If so, refrain from doing
  2740. // any other folding. This helps out other analyses which understand
  2741. // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
  2742. // and CodeGen. And in this case, at least one of the comparison
  2743. // operands has at least one user besides the compare (the select),
  2744. // which would often largely negate the benefit of folding anyway.
  2745. if (I.hasOneUse())
  2746. if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
  2747. if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
  2748. (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
  2749. return nullptr;
  2750. // See if we are doing a comparison between a constant and an instruction that
  2751. // can be folded into the comparison.
  2752. if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
  2753. // Since the RHS is a ConstantInt (CI), if the left hand side is an
  2754. // instruction, see if that instruction also has constants so that the
  2755. // instruction can be folded into the icmp
  2756. if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
  2757. if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
  2758. return Res;
  2759. }
  2760. // Handle icmp with constant (but not simple integer constant) RHS
  2761. if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
  2762. if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
  2763. switch (LHSI->getOpcode()) {
  2764. case Instruction::GetElementPtr:
  2765. // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
  2766. if (RHSC->isNullValue() &&
  2767. cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
  2768. return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
  2769. Constant::getNullValue(LHSI->getOperand(0)->getType()));
  2770. break;
  2771. case Instruction::PHI:
  2772. // Only fold icmp into the PHI if the phi and icmp are in the same
  2773. // block. If in the same block, we're encouraging jump threading. If
  2774. // not, we are just pessimizing the code by making an i1 phi.
  2775. if (LHSI->getParent() == I.getParent())
  2776. if (Instruction *NV = FoldOpIntoPhi(I))
  2777. return NV;
  2778. break;
  2779. case Instruction::Select: {
  2780. // If either operand of the select is a constant, we can fold the
  2781. // comparison into the select arms, which will cause one to be
  2782. // constant folded and the select turned into a bitwise or.
  2783. Value *Op1 = nullptr, *Op2 = nullptr;
  2784. ConstantInt *CI = 0;
  2785. if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
  2786. Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
  2787. CI = dyn_cast<ConstantInt>(Op1);
  2788. }
  2789. if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
  2790. Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
  2791. CI = dyn_cast<ConstantInt>(Op2);
  2792. }
  2793. // We only want to perform this transformation if it will not lead to
  2794. // additional code. This is true if either both sides of the select
  2795. // fold to a constant (in which case the icmp is replaced with a select
  2796. // which will usually simplify) or this is the only user of the
  2797. // select (in which case we are trading a select+icmp for a simpler
  2798. // select+icmp) or all uses of the select can be replaced based on
  2799. // dominance information ("Global cases").
  2800. bool Transform = false;
  2801. if (Op1 && Op2)
  2802. Transform = true;
  2803. else if (Op1 || Op2) {
  2804. // Local case
  2805. if (LHSI->hasOneUse())
  2806. Transform = true;
  2807. // Global cases
  2808. else if (CI && !CI->isZero())
  2809. // When Op1 is constant try replacing select with second operand.
  2810. // Otherwise Op2 is constant and try replacing select with first
  2811. // operand.
  2812. Transform = replacedSelectWithOperand(cast<SelectInst>(LHSI), &I,
  2813. Op1 ? 2 : 1);
  2814. }
  2815. if (Transform) {
  2816. if (!Op1)
  2817. Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1),
  2818. RHSC, I.getName());
  2819. if (!Op2)
  2820. Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2),
  2821. RHSC, I.getName());
  2822. return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
  2823. }
  2824. break;
  2825. }
  2826. case Instruction::IntToPtr:
  2827. // icmp pred inttoptr(X), null -> icmp pred X, 0
  2828. if (RHSC->isNullValue() &&
  2829. DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
  2830. return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
  2831. Constant::getNullValue(LHSI->getOperand(0)->getType()));
  2832. break;
  2833. case Instruction::Load:
  2834. // Try to optimize things like "A[i] > 4" to index computations.
  2835. if (GetElementPtrInst *GEP =
  2836. dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
  2837. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
  2838. if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
  2839. !cast<LoadInst>(LHSI)->isVolatile())
  2840. if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
  2841. return Res;
  2842. }
  2843. break;
  2844. }
  2845. }
  2846. // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
  2847. if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
  2848. if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
  2849. return NI;
  2850. if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
  2851. if (Instruction *NI = FoldGEPICmp(GEP, Op0,
  2852. ICmpInst::getSwappedPredicate(I.getPredicate()), I))
  2853. return NI;
  2854. // Test to see if the operands of the icmp are casted versions of other
  2855. // values. If the ptr->ptr cast can be stripped off both arguments, we do so
  2856. // now.
  2857. if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
  2858. if (Op0->getType()->isPointerTy() &&
  2859. (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
  2860. // We keep moving the cast from the left operand over to the right
  2861. // operand, where it can often be eliminated completely.
  2862. Op0 = CI->getOperand(0);
  2863. // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
  2864. // so eliminate it as well.
  2865. if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
  2866. Op1 = CI2->getOperand(0);
  2867. // If Op1 is a constant, we can fold the cast into the constant.
  2868. if (Op0->getType() != Op1->getType()) {
  2869. if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
  2870. Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
  2871. } else {
  2872. // Otherwise, cast the RHS right before the icmp
  2873. Op1 = Builder->CreateBitCast(Op1, Op0->getType());
  2874. }
  2875. }
  2876. return new ICmpInst(I.getPredicate(), Op0, Op1);
  2877. }
  2878. }
  2879. if (isa<CastInst>(Op0)) {
  2880. // Handle the special case of: icmp (cast bool to X), <cst>
  2881. // This comes up when you have code like
  2882. // int X = A < B;
  2883. // if (X) ...
  2884. // For generality, we handle any zero-extension of any operand comparison
  2885. // with a constant or another cast from the same type.
  2886. if (isa<Constant>(Op1) || isa<CastInst>(Op1))
  2887. if (Instruction *R = visitICmpInstWithCastAndCast(I))
  2888. return R;
  2889. }
  2890. // Special logic for binary operators.
  2891. BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
  2892. BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
  2893. if (BO0 || BO1) {
  2894. CmpInst::Predicate Pred = I.getPredicate();
  2895. bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
  2896. if (BO0 && isa<OverflowingBinaryOperator>(BO0))
  2897. NoOp0WrapProblem = ICmpInst::isEquality(Pred) ||
  2898. (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
  2899. (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
  2900. if (BO1 && isa<OverflowingBinaryOperator>(BO1))
  2901. NoOp1WrapProblem = ICmpInst::isEquality(Pred) ||
  2902. (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
  2903. (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
  2904. // Analyze the case when either Op0 or Op1 is an add instruction.
  2905. // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
  2906. Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
  2907. if (BO0 && BO0->getOpcode() == Instruction::Add)
  2908. A = BO0->getOperand(0), B = BO0->getOperand(1);
  2909. if (BO1 && BO1->getOpcode() == Instruction::Add)
  2910. C = BO1->getOperand(0), D = BO1->getOperand(1);
  2911. // icmp (X+cst) < 0 --> X < -cst
  2912. if (NoOp0WrapProblem && ICmpInst::isSigned(Pred) && match(Op1, m_Zero()))
  2913. if (ConstantInt *RHSC = dyn_cast_or_null<ConstantInt>(B))
  2914. if (!RHSC->isMinValue(/*isSigned=*/true))
  2915. return new ICmpInst(Pred, A, ConstantExpr::getNeg(RHSC));
  2916. // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
  2917. if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
  2918. return new ICmpInst(Pred, A == Op1 ? B : A,
  2919. Constant::getNullValue(Op1->getType()));
  2920. // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
  2921. if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
  2922. return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
  2923. C == Op0 ? D : C);
  2924. // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow.
  2925. if (A && C && (A == C || A == D || B == C || B == D) &&
  2926. NoOp0WrapProblem && NoOp1WrapProblem &&
  2927. // Try not to increase register pressure.
  2928. BO0->hasOneUse() && BO1->hasOneUse()) {
  2929. // Determine Y and Z in the form icmp (X+Y), (X+Z).
  2930. Value *Y, *Z;
  2931. if (A == C) {
  2932. // C + B == C + D -> B == D
  2933. Y = B;
  2934. Z = D;
  2935. } else if (A == D) {
  2936. // D + B == C + D -> B == C
  2937. Y = B;
  2938. Z = C;
  2939. } else if (B == C) {
  2940. // A + C == C + D -> A == D
  2941. Y = A;
  2942. Z = D;
  2943. } else {
  2944. assert(B == D);
  2945. // A + D == C + D -> A == C
  2946. Y = A;
  2947. Z = C;
  2948. }
  2949. return new ICmpInst(Pred, Y, Z);
  2950. }
  2951. // icmp slt (X + -1), Y -> icmp sle X, Y
  2952. if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
  2953. match(B, m_AllOnes()))
  2954. return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
  2955. // icmp sge (X + -1), Y -> icmp sgt X, Y
  2956. if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
  2957. match(B, m_AllOnes()))
  2958. return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
  2959. // icmp sle (X + 1), Y -> icmp slt X, Y
  2960. if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE &&
  2961. match(B, m_One()))
  2962. return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
  2963. // icmp sgt (X + 1), Y -> icmp sge X, Y
  2964. if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT &&
  2965. match(B, m_One()))
  2966. return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
  2967. // if C1 has greater magnitude than C2:
  2968. // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y
  2969. // s.t. C3 = C1 - C2
  2970. //
  2971. // if C2 has greater magnitude than C1:
  2972. // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3)
  2973. // s.t. C3 = C2 - C1
  2974. if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
  2975. (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
  2976. if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
  2977. if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
  2978. const APInt &AP1 = C1->getValue();
  2979. const APInt &AP2 = C2->getValue();
  2980. if (AP1.isNegative() == AP2.isNegative()) {
  2981. APInt AP1Abs = C1->getValue().abs();
  2982. APInt AP2Abs = C2->getValue().abs();
  2983. if (AP1Abs.uge(AP2Abs)) {
  2984. ConstantInt *C3 = Builder->getInt(AP1 - AP2);
  2985. Value *NewAdd = Builder->CreateNSWAdd(A, C3);
  2986. return new ICmpInst(Pred, NewAdd, C);
  2987. } else {
  2988. ConstantInt *C3 = Builder->getInt(AP2 - AP1);
  2989. Value *NewAdd = Builder->CreateNSWAdd(C, C3);
  2990. return new ICmpInst(Pred, A, NewAdd);
  2991. }
  2992. }
  2993. }
  2994. // Analyze the case when either Op0 or Op1 is a sub instruction.
  2995. // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
  2996. A = nullptr; B = nullptr; C = nullptr; D = nullptr;
  2997. if (BO0 && BO0->getOpcode() == Instruction::Sub)
  2998. A = BO0->getOperand(0), B = BO0->getOperand(1);
  2999. if (BO1 && BO1->getOpcode() == Instruction::Sub)
  3000. C = BO1->getOperand(0), D = BO1->getOperand(1);
  3001. // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow.
  3002. if (A == Op1 && NoOp0WrapProblem)
  3003. return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
  3004. // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow.
  3005. if (C == Op0 && NoOp1WrapProblem)
  3006. return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
  3007. // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow.
  3008. if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem &&
  3009. // Try not to increase register pressure.
  3010. BO0->hasOneUse() && BO1->hasOneUse())
  3011. return new ICmpInst(Pred, A, C);
  3012. // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow.
  3013. if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem &&
  3014. // Try not to increase register pressure.
  3015. BO0->hasOneUse() && BO1->hasOneUse())
  3016. return new ICmpInst(Pred, D, B);
  3017. // icmp (0-X) < cst --> x > -cst
  3018. if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
  3019. Value *X;
  3020. if (match(BO0, m_Neg(m_Value(X))))
  3021. if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
  3022. if (!RHSC->isMinValue(/*isSigned=*/true))
  3023. return new ICmpInst(I.getSwappedPredicate(), X,
  3024. ConstantExpr::getNeg(RHSC));
  3025. }
  3026. BinaryOperator *SRem = nullptr;
  3027. // icmp (srem X, Y), Y
  3028. if (BO0 && BO0->getOpcode() == Instruction::SRem &&
  3029. Op1 == BO0->getOperand(1))
  3030. SRem = BO0;
  3031. // icmp Y, (srem X, Y)
  3032. else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
  3033. Op0 == BO1->getOperand(1))
  3034. SRem = BO1;
  3035. if (SRem) {
  3036. // We don't check hasOneUse to avoid increasing register pressure because
  3037. // the value we use is the same value this instruction was already using.
  3038. switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
  3039. default: break;
  3040. case ICmpInst::ICMP_EQ:
  3041. return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
  3042. case ICmpInst::ICMP_NE:
  3043. return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
  3044. case ICmpInst::ICMP_SGT:
  3045. case ICmpInst::ICMP_SGE:
  3046. return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
  3047. Constant::getAllOnesValue(SRem->getType()));
  3048. case ICmpInst::ICMP_SLT:
  3049. case ICmpInst::ICMP_SLE:
  3050. return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
  3051. Constant::getNullValue(SRem->getType()));
  3052. }
  3053. }
  3054. if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() &&
  3055. BO0->hasOneUse() && BO1->hasOneUse() &&
  3056. BO0->getOperand(1) == BO1->getOperand(1)) {
  3057. switch (BO0->getOpcode()) {
  3058. default: break;
  3059. case Instruction::Add:
  3060. case Instruction::Sub:
  3061. case Instruction::Xor:
  3062. if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
  3063. return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
  3064. BO1->getOperand(0));
  3065. // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
  3066. if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) {
  3067. if (CI->getValue().isSignBit()) {
  3068. ICmpInst::Predicate Pred = I.isSigned()
  3069. ? I.getUnsignedPredicate()
  3070. : I.getSignedPredicate();
  3071. return new ICmpInst(Pred, BO0->getOperand(0),
  3072. BO1->getOperand(0));
  3073. }
  3074. if (CI->isMaxValue(true)) {
  3075. ICmpInst::Predicate Pred = I.isSigned()
  3076. ? I.getUnsignedPredicate()
  3077. : I.getSignedPredicate();
  3078. Pred = I.getSwappedPredicate(Pred);
  3079. return new ICmpInst(Pred, BO0->getOperand(0),
  3080. BO1->getOperand(0));
  3081. }
  3082. }
  3083. break;
  3084. case Instruction::Mul:
  3085. if (!I.isEquality())
  3086. break;
  3087. if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) {
  3088. // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
  3089. // Mask = -1 >> count-trailing-zeros(Cst).
  3090. if (!CI->isZero() && !CI->isOne()) {
  3091. const APInt &AP = CI->getValue();
  3092. ConstantInt *Mask = ConstantInt::get(I.getContext(),
  3093. APInt::getLowBitsSet(AP.getBitWidth(),
  3094. AP.getBitWidth() -
  3095. AP.countTrailingZeros()));
  3096. Value *And1 = Builder->CreateAnd(BO0->getOperand(0), Mask);
  3097. Value *And2 = Builder->CreateAnd(BO1->getOperand(0), Mask);
  3098. return new ICmpInst(I.getPredicate(), And1, And2);
  3099. }
  3100. }
  3101. break;
  3102. case Instruction::UDiv:
  3103. case Instruction::LShr:
  3104. if (I.isSigned())
  3105. break;
  3106. // fall-through
  3107. case Instruction::SDiv:
  3108. case Instruction::AShr:
  3109. if (!BO0->isExact() || !BO1->isExact())
  3110. break;
  3111. return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
  3112. BO1->getOperand(0));
  3113. case Instruction::Shl: {
  3114. bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
  3115. bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
  3116. if (!NUW && !NSW)
  3117. break;
  3118. if (!NSW && I.isSigned())
  3119. break;
  3120. return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
  3121. BO1->getOperand(0));
  3122. }
  3123. }
  3124. }
  3125. }
  3126. { Value *A, *B;
  3127. // Transform (A & ~B) == 0 --> (A & B) != 0
  3128. // and (A & ~B) != 0 --> (A & B) == 0
  3129. // if A is a power of 2.
  3130. if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
  3131. match(Op1, m_Zero()) &&
  3132. isKnownToBeAPowerOfTwo(A, DL, false, 0, AC, &I, DT) && I.isEquality())
  3133. return new ICmpInst(I.getInversePredicate(),
  3134. Builder->CreateAnd(A, B),
  3135. Op1);
  3136. // ~x < ~y --> y < x
  3137. // ~x < cst --> ~cst < x
  3138. if (match(Op0, m_Not(m_Value(A)))) {
  3139. if (match(Op1, m_Not(m_Value(B))))
  3140. return new ICmpInst(I.getPredicate(), B, A);
  3141. if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
  3142. return new ICmpInst(I.getPredicate(), ConstantExpr::getNot(RHSC), A);
  3143. }
  3144. Instruction *AddI = nullptr;
  3145. if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
  3146. m_Instruction(AddI))) &&
  3147. isa<IntegerType>(A->getType())) {
  3148. Value *Result;
  3149. Constant *Overflow;
  3150. if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result,
  3151. Overflow)) {
  3152. ReplaceInstUsesWith(*AddI, Result);
  3153. return ReplaceInstUsesWith(I, Overflow);
  3154. }
  3155. }
  3156. // (zext a) * (zext b) --> llvm.umul.with.overflow.
  3157. if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
  3158. if (Instruction *R = ProcessUMulZExtIdiom(I, Op0, Op1, *this))
  3159. return R;
  3160. }
  3161. if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
  3162. if (Instruction *R = ProcessUMulZExtIdiom(I, Op1, Op0, *this))
  3163. return R;
  3164. }
  3165. }
  3166. if (I.isEquality()) {
  3167. Value *A, *B, *C, *D;
  3168. if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
  3169. if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
  3170. Value *OtherVal = A == Op1 ? B : A;
  3171. return new ICmpInst(I.getPredicate(), OtherVal,
  3172. Constant::getNullValue(A->getType()));
  3173. }
  3174. if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
  3175. // A^c1 == C^c2 --> A == C^(c1^c2)
  3176. ConstantInt *C1, *C2;
  3177. if (match(B, m_ConstantInt(C1)) &&
  3178. match(D, m_ConstantInt(C2)) && Op1->hasOneUse()) {
  3179. Constant *NC = Builder->getInt(C1->getValue() ^ C2->getValue());
  3180. Value *Xor = Builder->CreateXor(C, NC);
  3181. return new ICmpInst(I.getPredicate(), A, Xor);
  3182. }
  3183. // A^B == A^D -> B == D
  3184. if (A == C) return new ICmpInst(I.getPredicate(), B, D);
  3185. if (A == D) return new ICmpInst(I.getPredicate(), B, C);
  3186. if (B == C) return new ICmpInst(I.getPredicate(), A, D);
  3187. if (B == D) return new ICmpInst(I.getPredicate(), A, C);
  3188. }
  3189. }
  3190. if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
  3191. (A == Op0 || B == Op0)) {
  3192. // A == (A^B) -> B == 0
  3193. Value *OtherVal = A == Op0 ? B : A;
  3194. return new ICmpInst(I.getPredicate(), OtherVal,
  3195. Constant::getNullValue(A->getType()));
  3196. }
  3197. // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
  3198. if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
  3199. match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
  3200. Value *X = nullptr, *Y = nullptr, *Z = nullptr;
  3201. if (A == C) {
  3202. X = B; Y = D; Z = A;
  3203. } else if (A == D) {
  3204. X = B; Y = C; Z = A;
  3205. } else if (B == C) {
  3206. X = A; Y = D; Z = B;
  3207. } else if (B == D) {
  3208. X = A; Y = C; Z = B;
  3209. }
  3210. if (X) { // Build (X^Y) & Z
  3211. Op1 = Builder->CreateXor(X, Y);
  3212. Op1 = Builder->CreateAnd(Op1, Z);
  3213. I.setOperand(0, Op1);
  3214. I.setOperand(1, Constant::getNullValue(Op1->getType()));
  3215. return &I;
  3216. }
  3217. }
  3218. // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
  3219. // and (B & (1<<X)-1) == (zext A) --> A == (trunc B)
  3220. ConstantInt *Cst1;
  3221. if ((Op0->hasOneUse() &&
  3222. match(Op0, m_ZExt(m_Value(A))) &&
  3223. match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
  3224. (Op1->hasOneUse() &&
  3225. match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
  3226. match(Op1, m_ZExt(m_Value(A))))) {
  3227. APInt Pow2 = Cst1->getValue() + 1;
  3228. if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
  3229. Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
  3230. return new ICmpInst(I.getPredicate(), A,
  3231. Builder->CreateTrunc(B, A->getType()));
  3232. }
  3233. // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
  3234. // For lshr and ashr pairs.
  3235. if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
  3236. match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
  3237. (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
  3238. match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
  3239. unsigned TypeBits = Cst1->getBitWidth();
  3240. unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
  3241. if (ShAmt < TypeBits && ShAmt != 0) {
  3242. ICmpInst::Predicate Pred = I.getPredicate() == ICmpInst::ICMP_NE
  3243. ? ICmpInst::ICMP_UGE
  3244. : ICmpInst::ICMP_ULT;
  3245. Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
  3246. APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
  3247. return new ICmpInst(Pred, Xor, Builder->getInt(CmpVal));
  3248. }
  3249. }
  3250. // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
  3251. if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
  3252. match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
  3253. unsigned TypeBits = Cst1->getBitWidth();
  3254. unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
  3255. if (ShAmt < TypeBits && ShAmt != 0) {
  3256. Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
  3257. APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
  3258. Value *And = Builder->CreateAnd(Xor, Builder->getInt(AndVal),
  3259. I.getName() + ".mask");
  3260. return new ICmpInst(I.getPredicate(), And,
  3261. Constant::getNullValue(Cst1->getType()));
  3262. }
  3263. }
  3264. // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
  3265. // "icmp (and X, mask), cst"
  3266. uint64_t ShAmt = 0;
  3267. if (Op0->hasOneUse() &&
  3268. match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A),
  3269. m_ConstantInt(ShAmt))))) &&
  3270. match(Op1, m_ConstantInt(Cst1)) &&
  3271. // Only do this when A has multiple uses. This is most important to do
  3272. // when it exposes other optimizations.
  3273. !A->hasOneUse()) {
  3274. unsigned ASize =cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
  3275. if (ShAmt < ASize) {
  3276. APInt MaskV =
  3277. APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
  3278. MaskV <<= ShAmt;
  3279. APInt CmpV = Cst1->getValue().zext(ASize);
  3280. CmpV <<= ShAmt;
  3281. Value *Mask = Builder->CreateAnd(A, Builder->getInt(MaskV));
  3282. return new ICmpInst(I.getPredicate(), Mask, Builder->getInt(CmpV));
  3283. }
  3284. }
  3285. }
  3286. // The 'cmpxchg' instruction returns an aggregate containing the old value and
  3287. // an i1 which indicates whether or not we successfully did the swap.
  3288. //
  3289. // Replace comparisons between the old value and the expected value with the
  3290. // indicator that 'cmpxchg' returns.
  3291. //
  3292. // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
  3293. // spuriously fail. In those cases, the old value may equal the expected
  3294. // value but it is possible for the swap to not occur.
  3295. if (I.getPredicate() == ICmpInst::ICMP_EQ)
  3296. if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
  3297. if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
  3298. if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
  3299. !ACXI->isWeak())
  3300. return ExtractValueInst::Create(ACXI, 1);
  3301. {
  3302. Value *X; ConstantInt *Cst;
  3303. // icmp X+Cst, X
  3304. if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
  3305. return FoldICmpAddOpCst(I, X, Cst, I.getPredicate());
  3306. // icmp X, X+Cst
  3307. if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
  3308. return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate());
  3309. }
  3310. return Changed ? &I : nullptr;
  3311. }
  3312. /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
  3313. Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
  3314. Instruction *LHSI,
  3315. Constant *RHSC) {
  3316. if (!isa<ConstantFP>(RHSC)) return nullptr;
  3317. const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
  3318. // Get the width of the mantissa. We don't want to hack on conversions that
  3319. // might lose information from the integer, e.g. "i64 -> float"
  3320. int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
  3321. if (MantissaWidth == -1) return nullptr; // Unknown.
  3322. IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
  3323. // Check to see that the input is converted from an integer type that is small
  3324. // enough that preserves all bits. TODO: check here for "known" sign bits.
  3325. // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
  3326. unsigned InputSize = IntTy->getScalarSizeInBits();
  3327. // If this is a uitofp instruction, we need an extra bit to hold the sign.
  3328. bool LHSUnsigned = isa<UIToFPInst>(LHSI);
  3329. if (LHSUnsigned)
  3330. ++InputSize;
  3331. if (I.isEquality()) {
  3332. FCmpInst::Predicate P = I.getPredicate();
  3333. bool IsExact = false;
  3334. APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
  3335. RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
  3336. // If the floating point constant isn't an integer value, we know if we will
  3337. // ever compare equal / not equal to it.
  3338. if (!IsExact) {
  3339. // TODO: Can never be -0.0 and other non-representable values
  3340. APFloat RHSRoundInt(RHS);
  3341. RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
  3342. if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
  3343. if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
  3344. return ReplaceInstUsesWith(I, Builder->getFalse());
  3345. assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
  3346. return ReplaceInstUsesWith(I, Builder->getTrue());
  3347. }
  3348. }
  3349. // TODO: If the constant is exactly representable, is it always OK to do
  3350. // equality compares as integer?
  3351. }
  3352. // Comparisons with zero are a special case where we know we won't lose
  3353. // information.
  3354. bool IsCmpZero = RHS.isPosZero();
  3355. // If the conversion would lose info, don't hack on this.
  3356. if ((int)InputSize > MantissaWidth && !IsCmpZero)
  3357. return nullptr;
  3358. // Otherwise, we can potentially simplify the comparison. We know that it
  3359. // will always come through as an integer value and we know the constant is
  3360. // not a NAN (it would have been previously simplified).
  3361. assert(!RHS.isNaN() && "NaN comparison not already folded!");
  3362. ICmpInst::Predicate Pred;
  3363. switch (I.getPredicate()) {
  3364. default: llvm_unreachable("Unexpected predicate!");
  3365. case FCmpInst::FCMP_UEQ:
  3366. case FCmpInst::FCMP_OEQ:
  3367. Pred = ICmpInst::ICMP_EQ;
  3368. break;
  3369. case FCmpInst::FCMP_UGT:
  3370. case FCmpInst::FCMP_OGT:
  3371. Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
  3372. break;
  3373. case FCmpInst::FCMP_UGE:
  3374. case FCmpInst::FCMP_OGE:
  3375. Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
  3376. break;
  3377. case FCmpInst::FCMP_ULT:
  3378. case FCmpInst::FCMP_OLT:
  3379. Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
  3380. break;
  3381. case FCmpInst::FCMP_ULE:
  3382. case FCmpInst::FCMP_OLE:
  3383. Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
  3384. break;
  3385. case FCmpInst::FCMP_UNE:
  3386. case FCmpInst::FCMP_ONE:
  3387. Pred = ICmpInst::ICMP_NE;
  3388. break;
  3389. case FCmpInst::FCMP_ORD:
  3390. return ReplaceInstUsesWith(I, Builder->getTrue());
  3391. case FCmpInst::FCMP_UNO:
  3392. return ReplaceInstUsesWith(I, Builder->getFalse());
  3393. }
  3394. // Now we know that the APFloat is a normal number, zero or inf.
  3395. // See if the FP constant is too large for the integer. For example,
  3396. // comparing an i8 to 300.0.
  3397. unsigned IntWidth = IntTy->getScalarSizeInBits();
  3398. if (!LHSUnsigned) {
  3399. // If the RHS value is > SignedMax, fold the comparison. This handles +INF
  3400. // and large values.
  3401. APFloat SMax(RHS.getSemantics());
  3402. SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
  3403. APFloat::rmNearestTiesToEven);
  3404. if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
  3405. if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
  3406. Pred == ICmpInst::ICMP_SLE)
  3407. return ReplaceInstUsesWith(I, Builder->getTrue());
  3408. return ReplaceInstUsesWith(I, Builder->getFalse());
  3409. }
  3410. } else {
  3411. // If the RHS value is > UnsignedMax, fold the comparison. This handles
  3412. // +INF and large values.
  3413. APFloat UMax(RHS.getSemantics());
  3414. UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
  3415. APFloat::rmNearestTiesToEven);
  3416. if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
  3417. if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
  3418. Pred == ICmpInst::ICMP_ULE)
  3419. return ReplaceInstUsesWith(I, Builder->getTrue());
  3420. return ReplaceInstUsesWith(I, Builder->getFalse());
  3421. }
  3422. }
  3423. if (!LHSUnsigned) {
  3424. // See if the RHS value is < SignedMin.
  3425. APFloat SMin(RHS.getSemantics());
  3426. SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
  3427. APFloat::rmNearestTiesToEven);
  3428. if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
  3429. if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
  3430. Pred == ICmpInst::ICMP_SGE)
  3431. return ReplaceInstUsesWith(I, Builder->getTrue());
  3432. return ReplaceInstUsesWith(I, Builder->getFalse());
  3433. }
  3434. } else {
  3435. // See if the RHS value is < UnsignedMin.
  3436. APFloat SMin(RHS.getSemantics());
  3437. SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true,
  3438. APFloat::rmNearestTiesToEven);
  3439. if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
  3440. if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
  3441. Pred == ICmpInst::ICMP_UGE)
  3442. return ReplaceInstUsesWith(I, Builder->getTrue());
  3443. return ReplaceInstUsesWith(I, Builder->getFalse());
  3444. }
  3445. }
  3446. // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
  3447. // [0, UMAX], but it may still be fractional. See if it is fractional by
  3448. // casting the FP value to the integer value and back, checking for equality.
  3449. // Don't do this for zero, because -0.0 is not fractional.
  3450. Constant *RHSInt = LHSUnsigned
  3451. ? ConstantExpr::getFPToUI(RHSC, IntTy)
  3452. : ConstantExpr::getFPToSI(RHSC, IntTy);
  3453. if (!RHS.isZero()) {
  3454. bool Equal = LHSUnsigned
  3455. ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
  3456. : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
  3457. if (!Equal) {
  3458. // If we had a comparison against a fractional value, we have to adjust
  3459. // the compare predicate and sometimes the value. RHSC is rounded towards
  3460. // zero at this point.
  3461. switch (Pred) {
  3462. default: llvm_unreachable("Unexpected integer comparison!");
  3463. case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
  3464. return ReplaceInstUsesWith(I, Builder->getTrue());
  3465. case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
  3466. return ReplaceInstUsesWith(I, Builder->getFalse());
  3467. case ICmpInst::ICMP_ULE:
  3468. // (float)int <= 4.4 --> int <= 4
  3469. // (float)int <= -4.4 --> false
  3470. if (RHS.isNegative())
  3471. return ReplaceInstUsesWith(I, Builder->getFalse());
  3472. break;
  3473. case ICmpInst::ICMP_SLE:
  3474. // (float)int <= 4.4 --> int <= 4
  3475. // (float)int <= -4.4 --> int < -4
  3476. if (RHS.isNegative())
  3477. Pred = ICmpInst::ICMP_SLT;
  3478. break;
  3479. case ICmpInst::ICMP_ULT:
  3480. // (float)int < -4.4 --> false
  3481. // (float)int < 4.4 --> int <= 4
  3482. if (RHS.isNegative())
  3483. return ReplaceInstUsesWith(I, Builder->getFalse());
  3484. Pred = ICmpInst::ICMP_ULE;
  3485. break;
  3486. case ICmpInst::ICMP_SLT:
  3487. // (float)int < -4.4 --> int < -4
  3488. // (float)int < 4.4 --> int <= 4
  3489. if (!RHS.isNegative())
  3490. Pred = ICmpInst::ICMP_SLE;
  3491. break;
  3492. case ICmpInst::ICMP_UGT:
  3493. // (float)int > 4.4 --> int > 4
  3494. // (float)int > -4.4 --> true
  3495. if (RHS.isNegative())
  3496. return ReplaceInstUsesWith(I, Builder->getTrue());
  3497. break;
  3498. case ICmpInst::ICMP_SGT:
  3499. // (float)int > 4.4 --> int > 4
  3500. // (float)int > -4.4 --> int >= -4
  3501. if (RHS.isNegative())
  3502. Pred = ICmpInst::ICMP_SGE;
  3503. break;
  3504. case ICmpInst::ICMP_UGE:
  3505. // (float)int >= -4.4 --> true
  3506. // (float)int >= 4.4 --> int > 4
  3507. if (RHS.isNegative())
  3508. return ReplaceInstUsesWith(I, Builder->getTrue());
  3509. Pred = ICmpInst::ICMP_UGT;
  3510. break;
  3511. case ICmpInst::ICMP_SGE:
  3512. // (float)int >= -4.4 --> int >= -4
  3513. // (float)int >= 4.4 --> int > 4
  3514. if (!RHS.isNegative())
  3515. Pred = ICmpInst::ICMP_SGT;
  3516. break;
  3517. }
  3518. }
  3519. }
  3520. // Lower this FP comparison into an appropriate integer version of the
  3521. // comparison.
  3522. return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
  3523. }
  3524. Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
  3525. bool Changed = false;
  3526. /// Orders the operands of the compare so that they are listed from most
  3527. /// complex to least complex. This puts constants before unary operators,
  3528. /// before binary operators.
  3529. if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
  3530. I.swapOperands();
  3531. Changed = true;
  3532. }
  3533. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  3534. if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1,
  3535. I.getFastMathFlags(), DL, TLI, DT, AC, &I))
  3536. return ReplaceInstUsesWith(I, V);
  3537. // Simplify 'fcmp pred X, X'
  3538. if (Op0 == Op1) {
  3539. switch (I.getPredicate()) {
  3540. default: llvm_unreachable("Unknown predicate!");
  3541. case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
  3542. case FCmpInst::FCMP_ULT: // True if unordered or less than
  3543. case FCmpInst::FCMP_UGT: // True if unordered or greater than
  3544. case FCmpInst::FCMP_UNE: // True if unordered or not equal
  3545. // Canonicalize these to be 'fcmp uno %X, 0.0'.
  3546. I.setPredicate(FCmpInst::FCMP_UNO);
  3547. I.setOperand(1, Constant::getNullValue(Op0->getType()));
  3548. return &I;
  3549. case FCmpInst::FCMP_ORD: // True if ordered (no nans)
  3550. case FCmpInst::FCMP_OEQ: // True if ordered and equal
  3551. case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
  3552. case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
  3553. // Canonicalize these to be 'fcmp ord %X, 0.0'.
  3554. I.setPredicate(FCmpInst::FCMP_ORD);
  3555. I.setOperand(1, Constant::getNullValue(Op0->getType()));
  3556. return &I;
  3557. }
  3558. }
  3559. // Test if the FCmpInst instruction is used exclusively by a select as
  3560. // part of a minimum or maximum operation. If so, refrain from doing
  3561. // any other folding. This helps out other analyses which understand
  3562. // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
  3563. // and CodeGen. And in this case, at least one of the comparison
  3564. // operands has at least one user besides the compare (the select),
  3565. // which would often largely negate the benefit of folding anyway.
  3566. if (I.hasOneUse())
  3567. if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
  3568. if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
  3569. (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
  3570. return nullptr;
  3571. // Handle fcmp with constant RHS
  3572. if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
  3573. if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
  3574. switch (LHSI->getOpcode()) {
  3575. case Instruction::FPExt: {
  3576. // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless
  3577. FPExtInst *LHSExt = cast<FPExtInst>(LHSI);
  3578. ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC);
  3579. if (!RHSF)
  3580. break;
  3581. const fltSemantics *Sem;
  3582. // FIXME: This shouldn't be here.
  3583. if (LHSExt->getSrcTy()->isHalfTy())
  3584. Sem = &APFloat::IEEEhalf;
  3585. else if (LHSExt->getSrcTy()->isFloatTy())
  3586. Sem = &APFloat::IEEEsingle;
  3587. else if (LHSExt->getSrcTy()->isDoubleTy())
  3588. Sem = &APFloat::IEEEdouble;
  3589. else if (LHSExt->getSrcTy()->isFP128Ty())
  3590. Sem = &APFloat::IEEEquad;
  3591. else if (LHSExt->getSrcTy()->isX86_FP80Ty())
  3592. Sem = &APFloat::x87DoubleExtended;
  3593. else if (LHSExt->getSrcTy()->isPPC_FP128Ty())
  3594. Sem = &APFloat::PPCDoubleDouble;
  3595. else
  3596. break;
  3597. bool Lossy;
  3598. APFloat F = RHSF->getValueAPF();
  3599. F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy);
  3600. // Avoid lossy conversions and denormals. Zero is a special case
  3601. // that's OK to convert.
  3602. APFloat Fabs = F;
  3603. Fabs.clearSign();
  3604. if (!Lossy &&
  3605. ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) !=
  3606. APFloat::cmpLessThan) || Fabs.isZero()))
  3607. return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
  3608. ConstantFP::get(RHSC->getContext(), F));
  3609. break;
  3610. }
  3611. case Instruction::PHI:
  3612. // Only fold fcmp into the PHI if the phi and fcmp are in the same
  3613. // block. If in the same block, we're encouraging jump threading. If
  3614. // not, we are just pessimizing the code by making an i1 phi.
  3615. if (LHSI->getParent() == I.getParent())
  3616. if (Instruction *NV = FoldOpIntoPhi(I))
  3617. return NV;
  3618. break;
  3619. case Instruction::SIToFP:
  3620. case Instruction::UIToFP:
  3621. if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
  3622. return NV;
  3623. break;
  3624. case Instruction::FSub: {
  3625. // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C
  3626. Value *Op;
  3627. if (match(LHSI, m_FNeg(m_Value(Op))))
  3628. return new FCmpInst(I.getSwappedPredicate(), Op,
  3629. ConstantExpr::getFNeg(RHSC));
  3630. break;
  3631. }
  3632. case Instruction::Load:
  3633. if (GetElementPtrInst *GEP =
  3634. dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
  3635. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
  3636. if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
  3637. !cast<LoadInst>(LHSI)->isVolatile())
  3638. if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
  3639. return Res;
  3640. }
  3641. break;
  3642. case Instruction::Call: {
  3643. if (!RHSC->isNullValue())
  3644. break;
  3645. CallInst *CI = cast<CallInst>(LHSI);
  3646. const Function *F = CI->getCalledFunction();
  3647. if (!F)
  3648. break;
  3649. // Various optimization for fabs compared with zero.
  3650. LibFunc::Func Func;
  3651. if (F->getIntrinsicID() == Intrinsic::fabs ||
  3652. (TLI->getLibFunc(F->getName(), Func) && TLI->has(Func) &&
  3653. (Func == LibFunc::fabs || Func == LibFunc::fabsf ||
  3654. Func == LibFunc::fabsl))) {
  3655. switch (I.getPredicate()) {
  3656. default:
  3657. break;
  3658. // fabs(x) < 0 --> false
  3659. case FCmpInst::FCMP_OLT:
  3660. return ReplaceInstUsesWith(I, Builder->getFalse());
  3661. // fabs(x) > 0 --> x != 0
  3662. case FCmpInst::FCMP_OGT:
  3663. return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC);
  3664. // fabs(x) <= 0 --> x == 0
  3665. case FCmpInst::FCMP_OLE:
  3666. return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC);
  3667. // fabs(x) >= 0 --> !isnan(x)
  3668. case FCmpInst::FCMP_OGE:
  3669. return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC);
  3670. // fabs(x) == 0 --> x == 0
  3671. // fabs(x) != 0 --> x != 0
  3672. case FCmpInst::FCMP_OEQ:
  3673. case FCmpInst::FCMP_UEQ:
  3674. case FCmpInst::FCMP_ONE:
  3675. case FCmpInst::FCMP_UNE:
  3676. return new FCmpInst(I.getPredicate(), CI->getArgOperand(0), RHSC);
  3677. }
  3678. }
  3679. }
  3680. }
  3681. }
  3682. // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y
  3683. Value *X, *Y;
  3684. if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
  3685. return new FCmpInst(I.getSwappedPredicate(), X, Y);
  3686. // fcmp (fpext x), (fpext y) -> fcmp x, y
  3687. if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0))
  3688. if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1))
  3689. if (LHSExt->getSrcTy() == RHSExt->getSrcTy())
  3690. return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
  3691. RHSExt->getOperand(0));
  3692. return Changed ? &I : nullptr;
  3693. }