ScalarEvolution.cpp 329 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602
  1. //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the implementation of the scalar evolution analysis
  11. // engine, which is used primarily to analyze expressions involving induction
  12. // variables in loops.
  13. //
  14. // There are several aspects to this library. First is the representation of
  15. // scalar expressions, which are represented as subclasses of the SCEV class.
  16. // These classes are used to represent certain types of subexpressions that we
  17. // can handle. We only create one SCEV of a particular shape, so
  18. // pointer-comparisons for equality are legal.
  19. //
  20. // One important aspect of the SCEV objects is that they are never cyclic, even
  21. // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
  22. // the PHI node is one of the idioms that we can represent (e.g., a polynomial
  23. // recurrence) then we represent it directly as a recurrence node, otherwise we
  24. // represent it as a SCEVUnknown node.
  25. //
  26. // In addition to being able to represent expressions of various types, we also
  27. // have folders that are used to build the *canonical* representation for a
  28. // particular expression. These folders are capable of using a variety of
  29. // rewrite rules to simplify the expressions.
  30. //
  31. // Once the folders are defined, we can implement the more interesting
  32. // higher-level code, such as the code that recognizes PHI nodes of various
  33. // types, computes the execution count of a loop, etc.
  34. //
  35. // TODO: We should use these routines and value representations to implement
  36. // dependence analysis!
  37. //
  38. //===----------------------------------------------------------------------===//
  39. //
  40. // There are several good references for the techniques used in this analysis.
  41. //
  42. // Chains of recurrences -- a method to expedite the evaluation
  43. // of closed-form functions
  44. // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
  45. //
  46. // On computational properties of chains of recurrences
  47. // Eugene V. Zima
  48. //
  49. // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
  50. // Robert A. van Engelen
  51. //
  52. // Efficient Symbolic Analysis for Optimizing Compilers
  53. // Robert A. van Engelen
  54. //
  55. // Using the chains of recurrences algebra for data dependence testing and
  56. // induction variable substitution
  57. // MS Thesis, Johnie Birch
  58. //
  59. //===----------------------------------------------------------------------===//
  60. #include "llvm/Analysis/ScalarEvolution.h"
  61. #include "llvm/ADT/Optional.h"
  62. #include "llvm/ADT/STLExtras.h"
  63. #include "llvm/ADT/SmallPtrSet.h"
  64. #include "llvm/ADT/Statistic.h"
  65. #include "llvm/Analysis/AssumptionCache.h"
  66. #include "llvm/Analysis/ConstantFolding.h"
  67. #include "llvm/Analysis/InstructionSimplify.h"
  68. #include "llvm/Analysis/LoopInfo.h"
  69. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  70. #include "llvm/Analysis/TargetLibraryInfo.h"
  71. #include "llvm/Analysis/ValueTracking.h"
  72. #include "llvm/IR/ConstantRange.h"
  73. #include "llvm/IR/Constants.h"
  74. #include "llvm/IR/DataLayout.h"
  75. #include "llvm/IR/DerivedTypes.h"
  76. #include "llvm/IR/Dominators.h"
  77. #include "llvm/IR/GetElementPtrTypeIterator.h"
  78. #include "llvm/IR/GlobalAlias.h"
  79. #include "llvm/IR/GlobalVariable.h"
  80. #include "llvm/IR/InstIterator.h"
  81. #include "llvm/IR/Instructions.h"
  82. #include "llvm/IR/LLVMContext.h"
  83. #include "llvm/IR/Metadata.h"
  84. #include "llvm/IR/Operator.h"
  85. #include "llvm/Support/CommandLine.h"
  86. #include "llvm/Support/Debug.h"
  87. #include "llvm/Support/ErrorHandling.h"
  88. #include "llvm/Support/MathExtras.h"
  89. #include "llvm/Support/raw_ostream.h"
  90. #include "llvm/Analysis/DxilValueCache.h" // HLSL Change
  91. #include <algorithm>
  92. using namespace llvm;
  93. #define DEBUG_TYPE "scalar-evolution"
  94. STATISTIC(NumArrayLenItCounts,
  95. "Number of trip counts computed with array length");
  96. STATISTIC(NumTripCountsComputed,
  97. "Number of loops with predictable loop counts");
  98. STATISTIC(NumTripCountsNotComputed,
  99. "Number of loops without predictable loop counts");
  100. STATISTIC(NumBruteForceTripCountsComputed,
  101. "Number of loops with trip counts computed by force");
  102. #if 0 // HLSL Change Starts - option pending
  103. static cl::opt<unsigned>
  104. MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
  105. cl::desc("Maximum number of iterations SCEV will "
  106. "symbolically execute a constant "
  107. "derived loop"),
  108. cl::init(100));
  109. // FIXME: Enable this with XDEBUG when the test suite is clean.
  110. static cl::opt<bool>
  111. VerifySCEV("verify-scev",
  112. cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
  113. #else
  114. static const unsigned MaxBruteForceIterations = 100;
  115. static const bool VerifySCEV = false;
  116. #endif // HLSL Change Ends
  117. INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
  118. "Scalar Evolution Analysis", false, true)
  119. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  120. INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
  121. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  122. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
  123. INITIALIZE_PASS_DEPENDENCY(DxilValueCache) // HLSL Change
  124. INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
  125. "Scalar Evolution Analysis", false, true)
  126. char ScalarEvolution::ID = 0;
  127. //===----------------------------------------------------------------------===//
  128. // SCEV class definitions
  129. //===----------------------------------------------------------------------===//
  130. //===----------------------------------------------------------------------===//
  131. // Implementation of the SCEV class.
  132. //
  133. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  134. void SCEV::dump() const {
  135. print(dbgs());
  136. dbgs() << '\n';
  137. }
  138. #endif
  139. void SCEV::print(raw_ostream &OS) const {
  140. switch (static_cast<SCEVTypes>(getSCEVType())) {
  141. case scConstant:
  142. cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
  143. return;
  144. case scTruncate: {
  145. const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
  146. const SCEV *Op = Trunc->getOperand();
  147. OS << "(trunc " << *Op->getType() << " " << *Op << " to "
  148. << *Trunc->getType() << ")";
  149. return;
  150. }
  151. case scZeroExtend: {
  152. const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
  153. const SCEV *Op = ZExt->getOperand();
  154. OS << "(zext " << *Op->getType() << " " << *Op << " to "
  155. << *ZExt->getType() << ")";
  156. return;
  157. }
  158. case scSignExtend: {
  159. const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
  160. const SCEV *Op = SExt->getOperand();
  161. OS << "(sext " << *Op->getType() << " " << *Op << " to "
  162. << *SExt->getType() << ")";
  163. return;
  164. }
  165. case scAddRecExpr: {
  166. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
  167. OS << "{" << *AR->getOperand(0);
  168. for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
  169. OS << ",+," << *AR->getOperand(i);
  170. OS << "}<";
  171. if (AR->getNoWrapFlags(FlagNUW))
  172. OS << "nuw><";
  173. if (AR->getNoWrapFlags(FlagNSW))
  174. OS << "nsw><";
  175. if (AR->getNoWrapFlags(FlagNW) &&
  176. !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
  177. OS << "nw><";
  178. AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
  179. OS << ">";
  180. return;
  181. }
  182. case scAddExpr:
  183. case scMulExpr:
  184. case scUMaxExpr:
  185. case scSMaxExpr: {
  186. const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
  187. const char *OpStr = nullptr;
  188. switch (NAry->getSCEVType()) {
  189. case scAddExpr: OpStr = " + "; break;
  190. case scMulExpr: OpStr = " * "; break;
  191. case scUMaxExpr: OpStr = " umax "; break;
  192. case scSMaxExpr: OpStr = " smax "; break;
  193. }
  194. OS << "(";
  195. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  196. I != E; ++I) {
  197. OS << **I;
  198. if (std::next(I) != E)
  199. OS << OpStr;
  200. }
  201. OS << ")";
  202. switch (NAry->getSCEVType()) {
  203. case scAddExpr:
  204. case scMulExpr:
  205. if (NAry->getNoWrapFlags(FlagNUW))
  206. OS << "<nuw>";
  207. if (NAry->getNoWrapFlags(FlagNSW))
  208. OS << "<nsw>";
  209. }
  210. return;
  211. }
  212. case scUDivExpr: {
  213. const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
  214. OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
  215. return;
  216. }
  217. case scUnknown: {
  218. const SCEVUnknown *U = cast<SCEVUnknown>(this);
  219. Type *AllocTy;
  220. if (U->isSizeOf(AllocTy)) {
  221. OS << "sizeof(" << *AllocTy << ")";
  222. return;
  223. }
  224. if (U->isAlignOf(AllocTy)) {
  225. OS << "alignof(" << *AllocTy << ")";
  226. return;
  227. }
  228. Type *CTy;
  229. Constant *FieldNo;
  230. if (U->isOffsetOf(CTy, FieldNo)) {
  231. OS << "offsetof(" << *CTy << ", ";
  232. FieldNo->printAsOperand(OS, false);
  233. OS << ")";
  234. return;
  235. }
  236. // Otherwise just print it normally.
  237. U->getValue()->printAsOperand(OS, false);
  238. return;
  239. }
  240. case scCouldNotCompute:
  241. OS << "***COULDNOTCOMPUTE***";
  242. return;
  243. }
  244. llvm_unreachable("Unknown SCEV kind!");
  245. }
  246. Type *SCEV::getType() const {
  247. switch (static_cast<SCEVTypes>(getSCEVType())) {
  248. case scConstant:
  249. return cast<SCEVConstant>(this)->getType();
  250. case scTruncate:
  251. case scZeroExtend:
  252. case scSignExtend:
  253. return cast<SCEVCastExpr>(this)->getType();
  254. case scAddRecExpr:
  255. case scMulExpr:
  256. case scUMaxExpr:
  257. case scSMaxExpr:
  258. return cast<SCEVNAryExpr>(this)->getType();
  259. case scAddExpr:
  260. return cast<SCEVAddExpr>(this)->getType();
  261. case scUDivExpr:
  262. return cast<SCEVUDivExpr>(this)->getType();
  263. case scUnknown:
  264. return cast<SCEVUnknown>(this)->getType();
  265. case scCouldNotCompute:
  266. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  267. }
  268. llvm_unreachable("Unknown SCEV kind!");
  269. }
  270. bool SCEV::isZero() const {
  271. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
  272. return SC->getValue()->isZero();
  273. return false;
  274. }
  275. bool SCEV::isOne() const {
  276. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
  277. return SC->getValue()->isOne();
  278. return false;
  279. }
  280. bool SCEV::isAllOnesValue() const {
  281. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
  282. return SC->getValue()->isAllOnesValue();
  283. return false;
  284. }
  285. /// isNonConstantNegative - Return true if the specified scev is negated, but
  286. /// not a constant.
  287. bool SCEV::isNonConstantNegative() const {
  288. const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
  289. if (!Mul) return false;
  290. // If there is a constant factor, it will be first.
  291. const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
  292. if (!SC) return false;
  293. // Return true if the value is negative, this matches things like (-42 * V).
  294. return SC->getValue()->getValue().isNegative();
  295. }
  296. SCEVCouldNotCompute::SCEVCouldNotCompute() :
  297. SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
  298. bool SCEVCouldNotCompute::classof(const SCEV *S) {
  299. return S->getSCEVType() == scCouldNotCompute;
  300. }
  301. const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
  302. FoldingSetNodeID ID;
  303. ID.AddInteger(scConstant);
  304. ID.AddPointer(V);
  305. void *IP = nullptr;
  306. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  307. SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
  308. UniqueSCEVs.InsertNode(S, IP);
  309. return S;
  310. }
  311. const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
  312. return getConstant(ConstantInt::get(getContext(), Val));
  313. }
  314. const SCEV *
  315. ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
  316. IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
  317. return getConstant(ConstantInt::get(ITy, V, isSigned));
  318. }
  319. SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
  320. unsigned SCEVTy, const SCEV *op, Type *ty)
  321. : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
  322. SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
  323. const SCEV *op, Type *ty)
  324. : SCEVCastExpr(ID, scTruncate, op, ty) {
  325. assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
  326. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  327. "Cannot truncate non-integer value!");
  328. }
  329. SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
  330. const SCEV *op, Type *ty)
  331. : SCEVCastExpr(ID, scZeroExtend, op, ty) {
  332. assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
  333. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  334. "Cannot zero extend non-integer value!");
  335. }
  336. SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
  337. const SCEV *op, Type *ty)
  338. : SCEVCastExpr(ID, scSignExtend, op, ty) {
  339. assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
  340. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  341. "Cannot sign extend non-integer value!");
  342. }
  343. void SCEVUnknown::deleted() {
  344. // Clear this SCEVUnknown from various maps.
  345. SE->forgetMemoizedResults(this);
  346. // Remove this SCEVUnknown from the uniquing map.
  347. SE->UniqueSCEVs.RemoveNode(this);
  348. // Release the value.
  349. setValPtr(nullptr);
  350. }
  351. void SCEVUnknown::allUsesReplacedWith(Value *New) {
  352. // Clear this SCEVUnknown from various maps.
  353. SE->forgetMemoizedResults(this);
  354. // Remove this SCEVUnknown from the uniquing map.
  355. SE->UniqueSCEVs.RemoveNode(this);
  356. // Update this SCEVUnknown to point to the new value. This is needed
  357. // because there may still be outstanding SCEVs which still point to
  358. // this SCEVUnknown.
  359. setValPtr(New);
  360. }
  361. bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
  362. if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
  363. if (VCE->getOpcode() == Instruction::PtrToInt)
  364. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
  365. if (CE->getOpcode() == Instruction::GetElementPtr &&
  366. CE->getOperand(0)->isNullValue() &&
  367. CE->getNumOperands() == 2)
  368. if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
  369. if (CI->isOne()) {
  370. AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
  371. ->getElementType();
  372. return true;
  373. }
  374. return false;
  375. }
  376. bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
  377. if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
  378. if (VCE->getOpcode() == Instruction::PtrToInt)
  379. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
  380. if (CE->getOpcode() == Instruction::GetElementPtr &&
  381. CE->getOperand(0)->isNullValue()) {
  382. Type *Ty =
  383. cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
  384. if (StructType *STy = dyn_cast<StructType>(Ty))
  385. if (!STy->isPacked() &&
  386. CE->getNumOperands() == 3 &&
  387. CE->getOperand(1)->isNullValue()) {
  388. if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
  389. if (CI->isOne() &&
  390. STy->getNumElements() == 2 &&
  391. STy->getElementType(0)->isIntegerTy(1)) {
  392. AllocTy = STy->getElementType(1);
  393. return true;
  394. }
  395. }
  396. }
  397. return false;
  398. }
  399. bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
  400. if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
  401. if (VCE->getOpcode() == Instruction::PtrToInt)
  402. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
  403. if (CE->getOpcode() == Instruction::GetElementPtr &&
  404. CE->getNumOperands() == 3 &&
  405. CE->getOperand(0)->isNullValue() &&
  406. CE->getOperand(1)->isNullValue()) {
  407. Type *Ty =
  408. cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
  409. // Ignore vector types here so that ScalarEvolutionExpander doesn't
  410. // emit getelementptrs that index into vectors.
  411. if (Ty->isStructTy() || Ty->isArrayTy()) {
  412. CTy = Ty;
  413. FieldNo = CE->getOperand(2);
  414. return true;
  415. }
  416. }
  417. return false;
  418. }
  419. //===----------------------------------------------------------------------===//
  420. // SCEV Utilities
  421. //===----------------------------------------------------------------------===//
  422. namespace {
  423. /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
  424. /// than the complexity of the RHS. This comparator is used to canonicalize
  425. /// expressions.
  426. class SCEVComplexityCompare {
  427. const LoopInfo *const LI;
  428. public:
  429. explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
  430. // Return true or false if LHS is less than, or at least RHS, respectively.
  431. bool operator()(const SCEV *LHS, const SCEV *RHS) const {
  432. return compare(LHS, RHS) < 0;
  433. }
  434. // Return negative, zero, or positive, if LHS is less than, equal to, or
  435. // greater than RHS, respectively. A three-way result allows recursive
  436. // comparisons to be more efficient.
  437. int compare(const SCEV *LHS, const SCEV *RHS) const {
  438. // Fast-path: SCEVs are uniqued so we can do a quick equality check.
  439. if (LHS == RHS)
  440. return 0;
  441. // Primarily, sort the SCEVs by their getSCEVType().
  442. unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
  443. if (LType != RType)
  444. return (int)LType - (int)RType;
  445. // Aside from the getSCEVType() ordering, the particular ordering
  446. // isn't very important except that it's beneficial to be consistent,
  447. // so that (a + b) and (b + a) don't end up as different expressions.
  448. switch (static_cast<SCEVTypes>(LType)) {
  449. case scUnknown: {
  450. const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
  451. const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
  452. // Sort SCEVUnknown values with some loose heuristics. TODO: This is
  453. // not as complete as it could be.
  454. const Value *LV = LU->getValue(), *RV = RU->getValue();
  455. // Order pointer values after integer values. This helps SCEVExpander
  456. // form GEPs.
  457. bool LIsPointer = LV->getType()->isPointerTy(),
  458. RIsPointer = RV->getType()->isPointerTy();
  459. if (LIsPointer != RIsPointer)
  460. return (int)LIsPointer - (int)RIsPointer;
  461. // Compare getValueID values.
  462. unsigned LID = LV->getValueID(),
  463. RID = RV->getValueID();
  464. if (LID != RID)
  465. return (int)LID - (int)RID;
  466. // Sort arguments by their position.
  467. if (const Argument *LA = dyn_cast<Argument>(LV)) {
  468. const Argument *RA = cast<Argument>(RV);
  469. unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
  470. return (int)LArgNo - (int)RArgNo;
  471. }
  472. // For instructions, compare their loop depth, and their operand
  473. // count. This is pretty loose.
  474. if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
  475. const Instruction *RInst = cast<Instruction>(RV);
  476. // Compare loop depths.
  477. const BasicBlock *LParent = LInst->getParent(),
  478. *RParent = RInst->getParent();
  479. if (LParent != RParent) {
  480. unsigned LDepth = LI->getLoopDepth(LParent),
  481. RDepth = LI->getLoopDepth(RParent);
  482. if (LDepth != RDepth)
  483. return (int)LDepth - (int)RDepth;
  484. }
  485. // Compare the number of operands.
  486. unsigned LNumOps = LInst->getNumOperands(),
  487. RNumOps = RInst->getNumOperands();
  488. return (int)LNumOps - (int)RNumOps;
  489. }
  490. return 0;
  491. }
  492. case scConstant: {
  493. const SCEVConstant *LC = cast<SCEVConstant>(LHS);
  494. const SCEVConstant *RC = cast<SCEVConstant>(RHS);
  495. // Compare constant values.
  496. const APInt &LA = LC->getValue()->getValue();
  497. const APInt &RA = RC->getValue()->getValue();
  498. unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
  499. if (LBitWidth != RBitWidth)
  500. return (int)LBitWidth - (int)RBitWidth;
  501. return LA.ult(RA) ? -1 : 1;
  502. }
  503. case scAddRecExpr: {
  504. const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
  505. const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
  506. // Compare addrec loop depths.
  507. const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
  508. if (LLoop != RLoop) {
  509. unsigned LDepth = LLoop->getLoopDepth(),
  510. RDepth = RLoop->getLoopDepth();
  511. if (LDepth != RDepth)
  512. return (int)LDepth - (int)RDepth;
  513. }
  514. // Addrec complexity grows with operand count.
  515. unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
  516. if (LNumOps != RNumOps)
  517. return (int)LNumOps - (int)RNumOps;
  518. // Lexicographically compare.
  519. for (unsigned i = 0; i != LNumOps; ++i) {
  520. long X = compare(LA->getOperand(i), RA->getOperand(i));
  521. if (X != 0)
  522. return X;
  523. }
  524. return 0;
  525. }
  526. case scAddExpr:
  527. case scMulExpr:
  528. case scSMaxExpr:
  529. case scUMaxExpr: {
  530. const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
  531. const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
  532. // Lexicographically compare n-ary expressions.
  533. unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
  534. if (LNumOps != RNumOps)
  535. return (int)LNumOps - (int)RNumOps;
  536. for (unsigned i = 0; i != LNumOps; ++i) {
  537. if (i >= RNumOps)
  538. return 1;
  539. long X = compare(LC->getOperand(i), RC->getOperand(i));
  540. if (X != 0)
  541. return X;
  542. }
  543. return (int)LNumOps - (int)RNumOps;
  544. }
  545. case scUDivExpr: {
  546. const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
  547. const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
  548. // Lexicographically compare udiv expressions.
  549. long X = compare(LC->getLHS(), RC->getLHS());
  550. if (X != 0)
  551. return X;
  552. return compare(LC->getRHS(), RC->getRHS());
  553. }
  554. case scTruncate:
  555. case scZeroExtend:
  556. case scSignExtend: {
  557. const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
  558. const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
  559. // Compare cast expressions by operand.
  560. return compare(LC->getOperand(), RC->getOperand());
  561. }
  562. case scCouldNotCompute:
  563. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  564. }
  565. llvm_unreachable("Unknown SCEV kind!");
  566. }
  567. };
  568. }
  569. /// GroupByComplexity - Given a list of SCEV objects, order them by their
  570. /// complexity, and group objects of the same complexity together by value.
  571. /// When this routine is finished, we know that any duplicates in the vector are
  572. /// consecutive and that complexity is monotonically increasing.
  573. ///
  574. /// Note that we go take special precautions to ensure that we get deterministic
  575. /// results from this routine. In other words, we don't want the results of
  576. /// this to depend on where the addresses of various SCEV objects happened to
  577. /// land in memory.
  578. ///
  579. static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
  580. LoopInfo *LI) {
  581. if (Ops.size() < 2) return; // Noop
  582. if (Ops.size() == 2) {
  583. // This is the common case, which also happens to be trivially simple.
  584. // Special case it.
  585. const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
  586. if (SCEVComplexityCompare(LI)(RHS, LHS))
  587. std::swap(LHS, RHS);
  588. return;
  589. }
  590. // Do the rough sort by complexity.
  591. std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
  592. // Now that we are sorted by complexity, group elements of the same
  593. // complexity. Note that this is, at worst, N^2, but the vector is likely to
  594. // be extremely short in practice. Note that we take this approach because we
  595. // do not want to depend on the addresses of the objects we are grouping.
  596. for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
  597. const SCEV *S = Ops[i];
  598. unsigned Complexity = S->getSCEVType();
  599. // If there are any objects of the same complexity and same value as this
  600. // one, group them.
  601. for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
  602. if (Ops[j] == S) { // Found a duplicate.
  603. // Move it to immediately after i'th element.
  604. std::swap(Ops[i+1], Ops[j]);
  605. ++i; // no need to rescan it.
  606. if (i == e-2) return; // Done!
  607. }
  608. }
  609. }
  610. }
  611. namespace {
  612. struct FindSCEVSize {
  613. int Size;
  614. FindSCEVSize() : Size(0) {}
  615. bool follow(const SCEV *S) {
  616. ++Size;
  617. // Keep looking at all operands of S.
  618. return true;
  619. }
  620. bool isDone() const {
  621. return false;
  622. }
  623. };
  624. }
  625. // Returns the size of the SCEV S.
  626. static inline int sizeOfSCEV(const SCEV *S) {
  627. FindSCEVSize F;
  628. SCEVTraversal<FindSCEVSize> ST(F);
  629. ST.visitAll(S);
  630. return F.Size;
  631. }
  632. namespace {
  633. struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
  634. public:
  635. // Computes the Quotient and Remainder of the division of Numerator by
  636. // Denominator.
  637. static void divide(ScalarEvolution &SE, const SCEV *Numerator,
  638. const SCEV *Denominator, const SCEV **Quotient,
  639. const SCEV **Remainder) {
  640. assert(Numerator && Denominator && "Uninitialized SCEV");
  641. SCEVDivision D(SE, Numerator, Denominator);
  642. // Check for the trivial case here to avoid having to check for it in the
  643. // rest of the code.
  644. if (Numerator == Denominator) {
  645. *Quotient = D.One;
  646. *Remainder = D.Zero;
  647. return;
  648. }
  649. if (Numerator->isZero()) {
  650. *Quotient = D.Zero;
  651. *Remainder = D.Zero;
  652. return;
  653. }
  654. // A simple case when N/1. The quotient is N.
  655. if (Denominator->isOne()) {
  656. *Quotient = Numerator;
  657. *Remainder = D.Zero;
  658. return;
  659. }
  660. // Split the Denominator when it is a product.
  661. if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) {
  662. const SCEV *Q, *R;
  663. *Quotient = Numerator;
  664. for (const SCEV *Op : T->operands()) {
  665. divide(SE, *Quotient, Op, &Q, &R);
  666. *Quotient = Q;
  667. // Bail out when the Numerator is not divisible by one of the terms of
  668. // the Denominator.
  669. if (!R->isZero()) {
  670. *Quotient = D.Zero;
  671. *Remainder = Numerator;
  672. return;
  673. }
  674. }
  675. *Remainder = D.Zero;
  676. return;
  677. }
  678. D.visit(Numerator);
  679. *Quotient = D.Quotient;
  680. *Remainder = D.Remainder;
  681. }
  682. // Except in the trivial case described above, we do not know how to divide
  683. // Expr by Denominator for the following functions with empty implementation.
  684. void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
  685. void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
  686. void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
  687. void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
  688. void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
  689. void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
  690. void visitUnknown(const SCEVUnknown *Numerator) {}
  691. void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}
  692. void visitConstant(const SCEVConstant *Numerator) {
  693. if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) {
  694. APInt NumeratorVal = Numerator->getValue()->getValue();
  695. APInt DenominatorVal = D->getValue()->getValue();
  696. uint32_t NumeratorBW = NumeratorVal.getBitWidth();
  697. uint32_t DenominatorBW = DenominatorVal.getBitWidth();
  698. if (NumeratorBW > DenominatorBW)
  699. DenominatorVal = DenominatorVal.sext(NumeratorBW);
  700. else if (NumeratorBW < DenominatorBW)
  701. NumeratorVal = NumeratorVal.sext(DenominatorBW);
  702. APInt QuotientVal(NumeratorVal.getBitWidth(), 0);
  703. APInt RemainderVal(NumeratorVal.getBitWidth(), 0);
  704. APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal);
  705. Quotient = SE.getConstant(QuotientVal);
  706. Remainder = SE.getConstant(RemainderVal);
  707. return;
  708. }
  709. }
  710. void visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
  711. const SCEV *StartQ, *StartR, *StepQ, *StepR;
  712. assert(Numerator->isAffine() && "Numerator should be affine");
  713. divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
  714. divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
  715. // Bail out if the types do not match.
  716. Type *Ty = Denominator->getType();
  717. if (Ty != StartQ->getType() || Ty != StartR->getType() ||
  718. Ty != StepQ->getType() || Ty != StepR->getType()) {
  719. Quotient = Zero;
  720. Remainder = Numerator;
  721. return;
  722. }
  723. Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
  724. Numerator->getNoWrapFlags());
  725. Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
  726. Numerator->getNoWrapFlags());
  727. }
  728. void visitAddExpr(const SCEVAddExpr *Numerator) {
  729. SmallVector<const SCEV *, 2> Qs, Rs;
  730. Type *Ty = Denominator->getType();
  731. for (const SCEV *Op : Numerator->operands()) {
  732. const SCEV *Q, *R;
  733. divide(SE, Op, Denominator, &Q, &R);
  734. // Bail out if types do not match.
  735. if (Ty != Q->getType() || Ty != R->getType()) {
  736. Quotient = Zero;
  737. Remainder = Numerator;
  738. return;
  739. }
  740. Qs.push_back(Q);
  741. Rs.push_back(R);
  742. }
  743. if (Qs.size() == 1) {
  744. Quotient = Qs[0];
  745. Remainder = Rs[0];
  746. return;
  747. }
  748. Quotient = SE.getAddExpr(Qs);
  749. Remainder = SE.getAddExpr(Rs);
  750. }
  751. void visitMulExpr(const SCEVMulExpr *Numerator) {
  752. SmallVector<const SCEV *, 2> Qs;
  753. Type *Ty = Denominator->getType();
  754. bool FoundDenominatorTerm = false;
  755. for (const SCEV *Op : Numerator->operands()) {
  756. // Bail out if types do not match.
  757. if (Ty != Op->getType()) {
  758. Quotient = Zero;
  759. Remainder = Numerator;
  760. return;
  761. }
  762. if (FoundDenominatorTerm) {
  763. Qs.push_back(Op);
  764. continue;
  765. }
  766. // Check whether Denominator divides one of the product operands.
  767. const SCEV *Q, *R;
  768. divide(SE, Op, Denominator, &Q, &R);
  769. if (!R->isZero()) {
  770. Qs.push_back(Op);
  771. continue;
  772. }
  773. // Bail out if types do not match.
  774. if (Ty != Q->getType()) {
  775. Quotient = Zero;
  776. Remainder = Numerator;
  777. return;
  778. }
  779. FoundDenominatorTerm = true;
  780. Qs.push_back(Q);
  781. }
  782. if (FoundDenominatorTerm) {
  783. Remainder = Zero;
  784. if (Qs.size() == 1)
  785. Quotient = Qs[0];
  786. else
  787. Quotient = SE.getMulExpr(Qs);
  788. return;
  789. }
  790. if (!isa<SCEVUnknown>(Denominator)) {
  791. Quotient = Zero;
  792. Remainder = Numerator;
  793. return;
  794. }
  795. // The Remainder is obtained by replacing Denominator by 0 in Numerator.
  796. ValueToValueMap RewriteMap;
  797. RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
  798. cast<SCEVConstant>(Zero)->getValue();
  799. Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
  800. if (Remainder->isZero()) {
  801. // The Quotient is obtained by replacing Denominator by 1 in Numerator.
  802. RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
  803. cast<SCEVConstant>(One)->getValue();
  804. Quotient =
  805. SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
  806. return;
  807. }
  808. // Quotient is (Numerator - Remainder) divided by Denominator.
  809. const SCEV *Q, *R;
  810. const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder);
  811. if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) {
  812. // This SCEV does not seem to simplify: fail the division here.
  813. Quotient = Zero;
  814. Remainder = Numerator;
  815. return;
  816. }
  817. divide(SE, Diff, Denominator, &Q, &R);
  818. assert(R == Zero &&
  819. "(Numerator - Remainder) should evenly divide Denominator");
  820. Quotient = Q;
  821. }
  822. private:
  823. SCEVDivision(ScalarEvolution &S, const SCEV *Numerator,
  824. const SCEV *Denominator)
  825. : SE(S), Denominator(Denominator) {
  826. Zero = SE.getConstant(Denominator->getType(), 0);
  827. One = SE.getConstant(Denominator->getType(), 1);
  828. // By default, we don't know how to divide Expr by Denominator.
  829. // Providing the default here simplifies the rest of the code.
  830. Quotient = Zero;
  831. Remainder = Numerator;
  832. }
  833. ScalarEvolution &SE;
  834. const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
  835. };
  836. }
  837. //===----------------------------------------------------------------------===//
  838. // Simple SCEV method implementations
  839. //===----------------------------------------------------------------------===//
  840. /// BinomialCoefficient - Compute BC(It, K). The result has width W.
  841. /// Assume, K > 0.
  842. static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
  843. ScalarEvolution &SE,
  844. Type *ResultTy) {
  845. // Handle the simplest case efficiently.
  846. if (K == 1)
  847. return SE.getTruncateOrZeroExtend(It, ResultTy);
  848. // We are using the following formula for BC(It, K):
  849. //
  850. // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
  851. //
  852. // Suppose, W is the bitwidth of the return value. We must be prepared for
  853. // overflow. Hence, we must assure that the result of our computation is
  854. // equal to the accurate one modulo 2^W. Unfortunately, division isn't
  855. // safe in modular arithmetic.
  856. //
  857. // However, this code doesn't use exactly that formula; the formula it uses
  858. // is something like the following, where T is the number of factors of 2 in
  859. // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
  860. // exponentiation:
  861. //
  862. // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
  863. //
  864. // This formula is trivially equivalent to the previous formula. However,
  865. // this formula can be implemented much more efficiently. The trick is that
  866. // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
  867. // arithmetic. To do exact division in modular arithmetic, all we have
  868. // to do is multiply by the inverse. Therefore, this step can be done at
  869. // width W.
  870. //
  871. // The next issue is how to safely do the division by 2^T. The way this
  872. // is done is by doing the multiplication step at a width of at least W + T
  873. // bits. This way, the bottom W+T bits of the product are accurate. Then,
  874. // when we perform the division by 2^T (which is equivalent to a right shift
  875. // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
  876. // truncated out after the division by 2^T.
  877. //
  878. // In comparison to just directly using the first formula, this technique
  879. // is much more efficient; using the first formula requires W * K bits,
  880. // but this formula less than W + K bits. Also, the first formula requires
  881. // a division step, whereas this formula only requires multiplies and shifts.
  882. //
  883. // It doesn't matter whether the subtraction step is done in the calculation
  884. // width or the input iteration count's width; if the subtraction overflows,
  885. // the result must be zero anyway. We prefer here to do it in the width of
  886. // the induction variable because it helps a lot for certain cases; CodeGen
  887. // isn't smart enough to ignore the overflow, which leads to much less
  888. // efficient code if the width of the subtraction is wider than the native
  889. // register width.
  890. //
  891. // (It's possible to not widen at all by pulling out factors of 2 before
  892. // the multiplication; for example, K=2 can be calculated as
  893. // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
  894. // extra arithmetic, so it's not an obvious win, and it gets
  895. // much more complicated for K > 3.)
  896. // Protection from insane SCEVs; this bound is conservative,
  897. // but it probably doesn't matter.
  898. if (K > 1000)
  899. return SE.getCouldNotCompute();
  900. unsigned W = SE.getTypeSizeInBits(ResultTy);
  901. // Calculate K! / 2^T and T; we divide out the factors of two before
  902. // multiplying for calculating K! / 2^T to avoid overflow.
  903. // Other overflow doesn't matter because we only care about the bottom
  904. // W bits of the result.
  905. APInt OddFactorial(W, 1);
  906. unsigned T = 1;
  907. for (unsigned i = 3; i <= K; ++i) {
  908. APInt Mult(W, i);
  909. unsigned TwoFactors = Mult.countTrailingZeros();
  910. T += TwoFactors;
  911. Mult = Mult.lshr(TwoFactors);
  912. OddFactorial *= Mult;
  913. }
  914. // We need at least W + T bits for the multiplication step
  915. unsigned CalculationBits = W + T;
  916. // Calculate 2^T, at width T+W.
  917. APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
  918. // Calculate the multiplicative inverse of K! / 2^T;
  919. // this multiplication factor will perform the exact division by
  920. // K! / 2^T.
  921. APInt Mod = APInt::getSignedMinValue(W+1);
  922. APInt MultiplyFactor = OddFactorial.zext(W+1);
  923. MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
  924. MultiplyFactor = MultiplyFactor.trunc(W);
  925. // Calculate the product, at width T+W
  926. IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
  927. CalculationBits);
  928. const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
  929. for (unsigned i = 1; i != K; ++i) {
  930. const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
  931. Dividend = SE.getMulExpr(Dividend,
  932. SE.getTruncateOrZeroExtend(S, CalculationTy));
  933. }
  934. // Divide by 2^T
  935. const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
  936. // Truncate the result, and divide by K! / 2^T.
  937. return SE.getMulExpr(SE.getConstant(MultiplyFactor),
  938. SE.getTruncateOrZeroExtend(DivResult, ResultTy));
  939. }
  940. /// evaluateAtIteration - Return the value of this chain of recurrences at
  941. /// the specified iteration number. We can evaluate this recurrence by
  942. /// multiplying each element in the chain by the binomial coefficient
  943. /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
  944. ///
  945. /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
  946. ///
  947. /// where BC(It, k) stands for binomial coefficient.
  948. ///
  949. const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
  950. ScalarEvolution &SE) const {
  951. const SCEV *Result = getStart();
  952. for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
  953. // The computation is correct in the face of overflow provided that the
  954. // multiplication is performed _after_ the evaluation of the binomial
  955. // coefficient.
  956. const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
  957. if (isa<SCEVCouldNotCompute>(Coeff))
  958. return Coeff;
  959. Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
  960. }
  961. return Result;
  962. }
  963. //===----------------------------------------------------------------------===//
  964. // SCEV Expression folder implementations
  965. //===----------------------------------------------------------------------===//
  966. const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
  967. Type *Ty) {
  968. assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
  969. "This is not a truncating conversion!");
  970. assert(isSCEVable(Ty) &&
  971. "This is not a conversion to a SCEVable type!");
  972. Ty = getEffectiveSCEVType(Ty);
  973. FoldingSetNodeID ID;
  974. ID.AddInteger(scTruncate);
  975. ID.AddPointer(Op);
  976. ID.AddPointer(Ty);
  977. void *IP = nullptr;
  978. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  979. // Fold if the operand is constant.
  980. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  981. return getConstant(
  982. cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
  983. // trunc(trunc(x)) --> trunc(x)
  984. if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
  985. return getTruncateExpr(ST->getOperand(), Ty);
  986. // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
  987. if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
  988. return getTruncateOrSignExtend(SS->getOperand(), Ty);
  989. // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
  990. if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
  991. return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
  992. // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
  993. // eliminate all the truncates, or we replace other casts with truncates.
  994. if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
  995. SmallVector<const SCEV *, 4> Operands;
  996. bool hasTrunc = false;
  997. for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
  998. const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
  999. if (!isa<SCEVCastExpr>(SA->getOperand(i)))
  1000. hasTrunc = isa<SCEVTruncateExpr>(S);
  1001. Operands.push_back(S);
  1002. }
  1003. if (!hasTrunc)
  1004. return getAddExpr(Operands);
  1005. UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
  1006. }
  1007. // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
  1008. // eliminate all the truncates, or we replace other casts with truncates.
  1009. if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
  1010. SmallVector<const SCEV *, 4> Operands;
  1011. bool hasTrunc = false;
  1012. for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
  1013. const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
  1014. if (!isa<SCEVCastExpr>(SM->getOperand(i)))
  1015. hasTrunc = isa<SCEVTruncateExpr>(S);
  1016. Operands.push_back(S);
  1017. }
  1018. if (!hasTrunc)
  1019. return getMulExpr(Operands);
  1020. UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
  1021. }
  1022. // If the input value is a chrec scev, truncate the chrec's operands.
  1023. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
  1024. SmallVector<const SCEV *, 4> Operands;
  1025. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
  1026. Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
  1027. return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
  1028. }
  1029. // The cast wasn't folded; create an explicit cast node. We can reuse
  1030. // the existing insert position since if we get here, we won't have
  1031. // made any changes which would invalidate it.
  1032. SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
  1033. Op, Ty);
  1034. UniqueSCEVs.InsertNode(S, IP);
  1035. return S;
  1036. }
  1037. // Get the limit of a recurrence such that incrementing by Step cannot cause
  1038. // signed overflow as long as the value of the recurrence within the
  1039. // loop does not exceed this limit before incrementing.
  1040. static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
  1041. ICmpInst::Predicate *Pred,
  1042. ScalarEvolution *SE) {
  1043. unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
  1044. if (SE->isKnownPositive(Step)) {
  1045. *Pred = ICmpInst::ICMP_SLT;
  1046. return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
  1047. SE->getSignedRange(Step).getSignedMax());
  1048. }
  1049. if (SE->isKnownNegative(Step)) {
  1050. *Pred = ICmpInst::ICMP_SGT;
  1051. return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
  1052. SE->getSignedRange(Step).getSignedMin());
  1053. }
  1054. return nullptr;
  1055. }
  1056. // Get the limit of a recurrence such that incrementing by Step cannot cause
  1057. // unsigned overflow as long as the value of the recurrence within the loop does
  1058. // not exceed this limit before incrementing.
  1059. static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
  1060. ICmpInst::Predicate *Pred,
  1061. ScalarEvolution *SE) {
  1062. unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
  1063. *Pred = ICmpInst::ICMP_ULT;
  1064. return SE->getConstant(APInt::getMinValue(BitWidth) -
  1065. SE->getUnsignedRange(Step).getUnsignedMax());
  1066. }
  1067. namespace {
  1068. struct ExtendOpTraitsBase {
  1069. typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *);
  1070. };
  1071. // Used to make code generic over signed and unsigned overflow.
  1072. template <typename ExtendOp> struct ExtendOpTraits {
  1073. // Members present:
  1074. //
  1075. // static const SCEV::NoWrapFlags WrapType;
  1076. //
  1077. // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
  1078. //
  1079. // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
  1080. // ICmpInst::Predicate *Pred,
  1081. // ScalarEvolution *SE);
  1082. };
  1083. template <>
  1084. struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
  1085. static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
  1086. static const GetExtendExprTy GetExtendExpr;
  1087. static const SCEV *getOverflowLimitForStep(const SCEV *Step,
  1088. ICmpInst::Predicate *Pred,
  1089. ScalarEvolution *SE) {
  1090. return getSignedOverflowLimitForStep(Step, Pred, SE);
  1091. }
  1092. };
  1093. const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
  1094. SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
  1095. template <>
  1096. struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
  1097. static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
  1098. static const GetExtendExprTy GetExtendExpr;
  1099. static const SCEV *getOverflowLimitForStep(const SCEV *Step,
  1100. ICmpInst::Predicate *Pred,
  1101. ScalarEvolution *SE) {
  1102. return getUnsignedOverflowLimitForStep(Step, Pred, SE);
  1103. }
  1104. };
  1105. const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
  1106. SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
  1107. }
  1108. // The recurrence AR has been shown to have no signed/unsigned wrap or something
  1109. // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
  1110. // easily prove NSW/NUW for its preincrement or postincrement sibling. This
  1111. // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
  1112. // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
  1113. // expression "Step + sext/zext(PreIncAR)" is congruent with
  1114. // "sext/zext(PostIncAR)"
  1115. template <typename ExtendOpTy>
  1116. static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
  1117. ScalarEvolution *SE) {
  1118. auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
  1119. auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
  1120. const Loop *L = AR->getLoop();
  1121. const SCEV *Start = AR->getStart();
  1122. const SCEV *Step = AR->getStepRecurrence(*SE);
  1123. // Check for a simple looking step prior to loop entry.
  1124. const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
  1125. if (!SA)
  1126. return nullptr;
  1127. // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
  1128. // subtraction is expensive. For this purpose, perform a quick and dirty
  1129. // difference, by checking for Step in the operand list.
  1130. SmallVector<const SCEV *, 4> DiffOps;
  1131. for (const SCEV *Op : SA->operands())
  1132. if (Op != Step)
  1133. DiffOps.push_back(Op);
  1134. if (DiffOps.size() == SA->getNumOperands())
  1135. return nullptr;
  1136. // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
  1137. // `Step`:
  1138. // 1. NSW/NUW flags on the step increment.
  1139. const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
  1140. const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
  1141. SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
  1142. // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
  1143. // "S+X does not sign/unsign-overflow".
  1144. //
  1145. const SCEV *BECount = SE->getBackedgeTakenCount(L);
  1146. if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
  1147. !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
  1148. return PreStart;
  1149. // 2. Direct overflow check on the step operation's expression.
  1150. unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
  1151. Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
  1152. const SCEV *OperandExtendedStart =
  1153. SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy),
  1154. (SE->*GetExtendExpr)(Step, WideTy));
  1155. if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) {
  1156. if (PreAR && AR->getNoWrapFlags(WrapType)) {
  1157. // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
  1158. // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
  1159. // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
  1160. const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType);
  1161. }
  1162. return PreStart;
  1163. }
  1164. // 3. Loop precondition.
  1165. ICmpInst::Predicate Pred;
  1166. const SCEV *OverflowLimit =
  1167. ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
  1168. if (OverflowLimit &&
  1169. SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
  1170. return PreStart;
  1171. }
  1172. return nullptr;
  1173. }
  1174. // Get the normalized zero or sign extended expression for this AddRec's Start.
  1175. template <typename ExtendOpTy>
  1176. static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
  1177. ScalarEvolution *SE) {
  1178. auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
  1179. const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE);
  1180. if (!PreStart)
  1181. return (SE->*GetExtendExpr)(AR->getStart(), Ty);
  1182. return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty),
  1183. (SE->*GetExtendExpr)(PreStart, Ty));
  1184. }
  1185. // Try to prove away overflow by looking at "nearby" add recurrences. A
  1186. // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
  1187. // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
  1188. //
  1189. // Formally:
  1190. //
  1191. // {S,+,X} == {S-T,+,X} + T
  1192. // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
  1193. //
  1194. // If ({S-T,+,X} + T) does not overflow ... (1)
  1195. //
  1196. // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
  1197. //
  1198. // If {S-T,+,X} does not overflow ... (2)
  1199. //
  1200. // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
  1201. // == {Ext(S-T)+Ext(T),+,Ext(X)}
  1202. //
  1203. // If (S-T)+T does not overflow ... (3)
  1204. //
  1205. // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
  1206. // == {Ext(S),+,Ext(X)} == LHS
  1207. //
  1208. // Thus, if (1), (2) and (3) are true for some T, then
  1209. // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
  1210. //
  1211. // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
  1212. // does not overflow" restricted to the 0th iteration. Therefore we only need
  1213. // to check for (1) and (2).
  1214. //
  1215. // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
  1216. // is `Delta` (defined below).
  1217. //
  1218. template <typename ExtendOpTy>
  1219. bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
  1220. const SCEV *Step,
  1221. const Loop *L) {
  1222. auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
  1223. // We restrict `Start` to a constant to prevent SCEV from spending too much
  1224. // time here. It is correct (but more expensive) to continue with a
  1225. // non-constant `Start` and do a general SCEV subtraction to compute
  1226. // `PreStart` below.
  1227. //
  1228. const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
  1229. if (!StartC)
  1230. return false;
  1231. APInt StartAI = StartC->getValue()->getValue();
  1232. for (unsigned Delta : {-2, -1, 1, 2}) {
  1233. const SCEV *PreStart = getConstant(StartAI - Delta);
  1234. // Give up if we don't already have the add recurrence we need because
  1235. // actually constructing an add recurrence is relatively expensive.
  1236. const SCEVAddRecExpr *PreAR = [&]() {
  1237. FoldingSetNodeID ID;
  1238. ID.AddInteger(scAddRecExpr);
  1239. ID.AddPointer(PreStart);
  1240. ID.AddPointer(Step);
  1241. ID.AddPointer(L);
  1242. void *IP = nullptr;
  1243. return static_cast<SCEVAddRecExpr *>(
  1244. this->UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  1245. }();
  1246. if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
  1247. const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
  1248. ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
  1249. const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
  1250. DeltaS, &Pred, this);
  1251. if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
  1252. return true;
  1253. }
  1254. }
  1255. return false;
  1256. }
  1257. const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
  1258. Type *Ty) {
  1259. assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
  1260. "This is not an extending conversion!");
  1261. assert(isSCEVable(Ty) &&
  1262. "This is not a conversion to a SCEVable type!");
  1263. Ty = getEffectiveSCEVType(Ty);
  1264. // Fold if the operand is constant.
  1265. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  1266. return getConstant(
  1267. cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
  1268. // zext(zext(x)) --> zext(x)
  1269. if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
  1270. return getZeroExtendExpr(SZ->getOperand(), Ty);
  1271. // Before doing any expensive analysis, check to see if we've already
  1272. // computed a SCEV for this Op and Ty.
  1273. FoldingSetNodeID ID;
  1274. ID.AddInteger(scZeroExtend);
  1275. ID.AddPointer(Op);
  1276. ID.AddPointer(Ty);
  1277. void *IP = nullptr;
  1278. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1279. // zext(trunc(x)) --> zext(x) or x or trunc(x)
  1280. if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
  1281. // It's possible the bits taken off by the truncate were all zero bits. If
  1282. // so, we should be able to simplify this further.
  1283. const SCEV *X = ST->getOperand();
  1284. ConstantRange CR = getUnsignedRange(X);
  1285. unsigned TruncBits = getTypeSizeInBits(ST->getType());
  1286. unsigned NewBits = getTypeSizeInBits(Ty);
  1287. if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
  1288. CR.zextOrTrunc(NewBits)))
  1289. return getTruncateOrZeroExtend(X, Ty);
  1290. }
  1291. // If the input value is a chrec scev, and we can prove that the value
  1292. // did not overflow the old, smaller, value, we can zero extend all of the
  1293. // operands (often constants). This allows analysis of something like
  1294. // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
  1295. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
  1296. if (AR->isAffine()) {
  1297. const SCEV *Start = AR->getStart();
  1298. const SCEV *Step = AR->getStepRecurrence(*this);
  1299. unsigned BitWidth = getTypeSizeInBits(AR->getType());
  1300. const Loop *L = AR->getLoop();
  1301. // If we have special knowledge that this addrec won't overflow,
  1302. // we don't need to do any further analysis.
  1303. if (AR->getNoWrapFlags(SCEV::FlagNUW))
  1304. return getAddRecExpr(
  1305. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1306. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1307. // Check whether the backedge-taken count is SCEVCouldNotCompute.
  1308. // Note that this serves two purposes: It filters out loops that are
  1309. // simply not analyzable, and it covers the case where this code is
  1310. // being called from within backedge-taken count analysis, such that
  1311. // attempting to ask for the backedge-taken count would likely result
  1312. // in infinite recursion. In the later case, the analysis code will
  1313. // cope with a conservative value, and it will take care to purge
  1314. // that value once it has finished.
  1315. const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
  1316. if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
  1317. // Manually compute the final value for AR, checking for
  1318. // overflow.
  1319. // Check whether the backedge-taken count can be losslessly casted to
  1320. // the addrec's type. The count is always unsigned.
  1321. const SCEV *CastedMaxBECount =
  1322. getTruncateOrZeroExtend(MaxBECount, Start->getType());
  1323. const SCEV *RecastedMaxBECount =
  1324. getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
  1325. if (MaxBECount == RecastedMaxBECount) {
  1326. Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
  1327. // Check whether Start+Step*MaxBECount has no unsigned overflow.
  1328. const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
  1329. const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
  1330. const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
  1331. const SCEV *WideMaxBECount =
  1332. getZeroExtendExpr(CastedMaxBECount, WideTy);
  1333. const SCEV *OperandExtendedAdd =
  1334. getAddExpr(WideStart,
  1335. getMulExpr(WideMaxBECount,
  1336. getZeroExtendExpr(Step, WideTy)));
  1337. if (ZAdd == OperandExtendedAdd) {
  1338. // Cache knowledge of AR NUW, which is propagated to this AddRec.
  1339. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
  1340. // Return the expression with the addrec on the outside.
  1341. return getAddRecExpr(
  1342. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1343. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1344. }
  1345. // Similar to above, only this time treat the step value as signed.
  1346. // This covers loops that count down.
  1347. OperandExtendedAdd =
  1348. getAddExpr(WideStart,
  1349. getMulExpr(WideMaxBECount,
  1350. getSignExtendExpr(Step, WideTy)));
  1351. if (ZAdd == OperandExtendedAdd) {
  1352. // Cache knowledge of AR NW, which is propagated to this AddRec.
  1353. // Negative step causes unsigned wrap, but it still can't self-wrap.
  1354. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
  1355. // Return the expression with the addrec on the outside.
  1356. return getAddRecExpr(
  1357. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1358. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1359. }
  1360. }
  1361. // If the backedge is guarded by a comparison with the pre-inc value
  1362. // the addrec is safe. Also, if the entry is guarded by a comparison
  1363. // with the start value and the backedge is guarded by a comparison
  1364. // with the post-inc value, the addrec is safe.
  1365. if (isKnownPositive(Step)) {
  1366. const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
  1367. getUnsignedRange(Step).getUnsignedMax());
  1368. if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
  1369. (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
  1370. isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
  1371. AR->getPostIncExpr(*this), N))) {
  1372. // Cache knowledge of AR NUW, which is propagated to this AddRec.
  1373. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
  1374. // Return the expression with the addrec on the outside.
  1375. return getAddRecExpr(
  1376. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1377. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1378. }
  1379. } else if (isKnownNegative(Step)) {
  1380. const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
  1381. getSignedRange(Step).getSignedMin());
  1382. if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
  1383. (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
  1384. isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
  1385. AR->getPostIncExpr(*this), N))) {
  1386. // Cache knowledge of AR NW, which is propagated to this AddRec.
  1387. // Negative step causes unsigned wrap, but it still can't self-wrap.
  1388. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
  1389. // Return the expression with the addrec on the outside.
  1390. return getAddRecExpr(
  1391. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1392. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1393. }
  1394. }
  1395. }
  1396. if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
  1397. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
  1398. return getAddRecExpr(
  1399. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1400. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1401. }
  1402. }
  1403. // The cast wasn't folded; create an explicit cast node.
  1404. // Recompute the insert position, as it may have been invalidated.
  1405. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1406. SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
  1407. Op, Ty);
  1408. UniqueSCEVs.InsertNode(S, IP);
  1409. return S;
  1410. }
  1411. const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
  1412. Type *Ty) {
  1413. assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
  1414. "This is not an extending conversion!");
  1415. assert(isSCEVable(Ty) &&
  1416. "This is not a conversion to a SCEVable type!");
  1417. Ty = getEffectiveSCEVType(Ty);
  1418. // Fold if the operand is constant.
  1419. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  1420. return getConstant(
  1421. cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
  1422. // sext(sext(x)) --> sext(x)
  1423. if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
  1424. return getSignExtendExpr(SS->getOperand(), Ty);
  1425. // sext(zext(x)) --> zext(x)
  1426. if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
  1427. return getZeroExtendExpr(SZ->getOperand(), Ty);
  1428. // Before doing any expensive analysis, check to see if we've already
  1429. // computed a SCEV for this Op and Ty.
  1430. FoldingSetNodeID ID;
  1431. ID.AddInteger(scSignExtend);
  1432. ID.AddPointer(Op);
  1433. ID.AddPointer(Ty);
  1434. void *IP = nullptr;
  1435. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1436. // If the input value is provably positive, build a zext instead.
  1437. if (isKnownNonNegative(Op))
  1438. return getZeroExtendExpr(Op, Ty);
  1439. // sext(trunc(x)) --> sext(x) or x or trunc(x)
  1440. if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
  1441. // It's possible the bits taken off by the truncate were all sign bits. If
  1442. // so, we should be able to simplify this further.
  1443. const SCEV *X = ST->getOperand();
  1444. ConstantRange CR = getSignedRange(X);
  1445. unsigned TruncBits = getTypeSizeInBits(ST->getType());
  1446. unsigned NewBits = getTypeSizeInBits(Ty);
  1447. if (CR.truncate(TruncBits).signExtend(NewBits).contains(
  1448. CR.sextOrTrunc(NewBits)))
  1449. return getTruncateOrSignExtend(X, Ty);
  1450. }
  1451. // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2
  1452. if (auto SA = dyn_cast<SCEVAddExpr>(Op)) {
  1453. if (SA->getNumOperands() == 2) {
  1454. auto SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0));
  1455. auto SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1));
  1456. if (SMul && SC1) {
  1457. if (auto SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) {
  1458. const APInt &C1 = SC1->getValue()->getValue();
  1459. const APInt &C2 = SC2->getValue()->getValue();
  1460. if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
  1461. C2.ugt(C1) && C2.isPowerOf2())
  1462. return getAddExpr(getSignExtendExpr(SC1, Ty),
  1463. getSignExtendExpr(SMul, Ty));
  1464. }
  1465. }
  1466. }
  1467. }
  1468. // If the input value is a chrec scev, and we can prove that the value
  1469. // did not overflow the old, smaller, value, we can sign extend all of the
  1470. // operands (often constants). This allows analysis of something like
  1471. // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
  1472. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
  1473. if (AR->isAffine()) {
  1474. const SCEV *Start = AR->getStart();
  1475. const SCEV *Step = AR->getStepRecurrence(*this);
  1476. unsigned BitWidth = getTypeSizeInBits(AR->getType());
  1477. const Loop *L = AR->getLoop();
  1478. // If we have special knowledge that this addrec won't overflow,
  1479. // we don't need to do any further analysis.
  1480. if (AR->getNoWrapFlags(SCEV::FlagNSW))
  1481. return getAddRecExpr(
  1482. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1483. getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW);
  1484. // Check whether the backedge-taken count is SCEVCouldNotCompute.
  1485. // Note that this serves two purposes: It filters out loops that are
  1486. // simply not analyzable, and it covers the case where this code is
  1487. // being called from within backedge-taken count analysis, such that
  1488. // attempting to ask for the backedge-taken count would likely result
  1489. // in infinite recursion. In the later case, the analysis code will
  1490. // cope with a conservative value, and it will take care to purge
  1491. // that value once it has finished.
  1492. const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
  1493. if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
  1494. // Manually compute the final value for AR, checking for
  1495. // overflow.
  1496. // Check whether the backedge-taken count can be losslessly casted to
  1497. // the addrec's type. The count is always unsigned.
  1498. const SCEV *CastedMaxBECount =
  1499. getTruncateOrZeroExtend(MaxBECount, Start->getType());
  1500. const SCEV *RecastedMaxBECount =
  1501. getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
  1502. if (MaxBECount == RecastedMaxBECount) {
  1503. Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
  1504. // Check whether Start+Step*MaxBECount has no signed overflow.
  1505. const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
  1506. const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
  1507. const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
  1508. const SCEV *WideMaxBECount =
  1509. getZeroExtendExpr(CastedMaxBECount, WideTy);
  1510. const SCEV *OperandExtendedAdd =
  1511. getAddExpr(WideStart,
  1512. getMulExpr(WideMaxBECount,
  1513. getSignExtendExpr(Step, WideTy)));
  1514. if (SAdd == OperandExtendedAdd) {
  1515. // Cache knowledge of AR NSW, which is propagated to this AddRec.
  1516. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
  1517. // Return the expression with the addrec on the outside.
  1518. return getAddRecExpr(
  1519. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1520. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1521. }
  1522. // Similar to above, only this time treat the step value as unsigned.
  1523. // This covers loops that count up with an unsigned step.
  1524. OperandExtendedAdd =
  1525. getAddExpr(WideStart,
  1526. getMulExpr(WideMaxBECount,
  1527. getZeroExtendExpr(Step, WideTy)));
  1528. if (SAdd == OperandExtendedAdd) {
  1529. // If AR wraps around then
  1530. //
  1531. // abs(Step) * MaxBECount > unsigned-max(AR->getType())
  1532. // => SAdd != OperandExtendedAdd
  1533. //
  1534. // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
  1535. // (SAdd == OperandExtendedAdd => AR is NW)
  1536. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
  1537. // Return the expression with the addrec on the outside.
  1538. return getAddRecExpr(
  1539. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1540. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1541. }
  1542. }
  1543. // If the backedge is guarded by a comparison with the pre-inc value
  1544. // the addrec is safe. Also, if the entry is guarded by a comparison
  1545. // with the start value and the backedge is guarded by a comparison
  1546. // with the post-inc value, the addrec is safe.
  1547. ICmpInst::Predicate Pred;
  1548. const SCEV *OverflowLimit =
  1549. getSignedOverflowLimitForStep(Step, &Pred, this);
  1550. if (OverflowLimit &&
  1551. (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
  1552. (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
  1553. isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
  1554. OverflowLimit)))) {
  1555. // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
  1556. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
  1557. return getAddRecExpr(
  1558. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1559. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1560. }
  1561. }
  1562. // If Start and Step are constants, check if we can apply this
  1563. // transformation:
  1564. // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2
  1565. auto SC1 = dyn_cast<SCEVConstant>(Start);
  1566. auto SC2 = dyn_cast<SCEVConstant>(Step);
  1567. if (SC1 && SC2) {
  1568. const APInt &C1 = SC1->getValue()->getValue();
  1569. const APInt &C2 = SC2->getValue()->getValue();
  1570. if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
  1571. C2.isPowerOf2()) {
  1572. Start = getSignExtendExpr(Start, Ty);
  1573. const SCEV *NewAR = getAddRecExpr(getConstant(AR->getType(), 0), Step,
  1574. L, AR->getNoWrapFlags());
  1575. return getAddExpr(Start, getSignExtendExpr(NewAR, Ty));
  1576. }
  1577. }
  1578. if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
  1579. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
  1580. return getAddRecExpr(
  1581. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1582. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1583. }
  1584. }
  1585. // The cast wasn't folded; create an explicit cast node.
  1586. // Recompute the insert position, as it may have been invalidated.
  1587. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1588. SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
  1589. Op, Ty);
  1590. UniqueSCEVs.InsertNode(S, IP);
  1591. return S;
  1592. }
  1593. /// getAnyExtendExpr - Return a SCEV for the given operand extended with
  1594. /// unspecified bits out to the given type.
  1595. ///
  1596. const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
  1597. Type *Ty) {
  1598. assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
  1599. "This is not an extending conversion!");
  1600. assert(isSCEVable(Ty) &&
  1601. "This is not a conversion to a SCEVable type!");
  1602. Ty = getEffectiveSCEVType(Ty);
  1603. // Sign-extend negative constants.
  1604. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  1605. if (SC->getValue()->getValue().isNegative())
  1606. return getSignExtendExpr(Op, Ty);
  1607. // Peel off a truncate cast.
  1608. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
  1609. const SCEV *NewOp = T->getOperand();
  1610. if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
  1611. return getAnyExtendExpr(NewOp, Ty);
  1612. return getTruncateOrNoop(NewOp, Ty);
  1613. }
  1614. // Next try a zext cast. If the cast is folded, use it.
  1615. const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
  1616. if (!isa<SCEVZeroExtendExpr>(ZExt))
  1617. return ZExt;
  1618. // Next try a sext cast. If the cast is folded, use it.
  1619. const SCEV *SExt = getSignExtendExpr(Op, Ty);
  1620. if (!isa<SCEVSignExtendExpr>(SExt))
  1621. return SExt;
  1622. // Force the cast to be folded into the operands of an addrec.
  1623. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
  1624. SmallVector<const SCEV *, 4> Ops;
  1625. for (const SCEV *Op : AR->operands())
  1626. Ops.push_back(getAnyExtendExpr(Op, Ty));
  1627. return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
  1628. }
  1629. // If the expression is obviously signed, use the sext cast value.
  1630. if (isa<SCEVSMaxExpr>(Op))
  1631. return SExt;
  1632. // Absent any other information, use the zext cast value.
  1633. return ZExt;
  1634. }
  1635. /// CollectAddOperandsWithScales - Process the given Ops list, which is
  1636. /// a list of operands to be added under the given scale, update the given
  1637. /// map. This is a helper function for getAddRecExpr. As an example of
  1638. /// what it does, given a sequence of operands that would form an add
  1639. /// expression like this:
  1640. ///
  1641. /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
  1642. ///
  1643. /// where A and B are constants, update the map with these values:
  1644. ///
  1645. /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
  1646. ///
  1647. /// and add 13 + A*B*29 to AccumulatedConstant.
  1648. /// This will allow getAddRecExpr to produce this:
  1649. ///
  1650. /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
  1651. ///
  1652. /// This form often exposes folding opportunities that are hidden in
  1653. /// the original operand list.
  1654. ///
  1655. /// Return true iff it appears that any interesting folding opportunities
  1656. /// may be exposed. This helps getAddRecExpr short-circuit extra work in
  1657. /// the common case where no interesting opportunities are present, and
  1658. /// is also used as a check to avoid infinite recursion.
  1659. ///
  1660. static bool
  1661. CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
  1662. SmallVectorImpl<const SCEV *> &NewOps,
  1663. APInt &AccumulatedConstant,
  1664. const SCEV *const *Ops, size_t NumOperands,
  1665. const APInt &Scale,
  1666. ScalarEvolution &SE) {
  1667. bool Interesting = false;
  1668. // Iterate over the add operands. They are sorted, with constants first.
  1669. unsigned i = 0;
  1670. while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
  1671. ++i;
  1672. // Pull a buried constant out to the outside.
  1673. if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
  1674. Interesting = true;
  1675. AccumulatedConstant += Scale * C->getValue()->getValue();
  1676. }
  1677. // Next comes everything else. We're especially interested in multiplies
  1678. // here, but they're in the middle, so just visit the rest with one loop.
  1679. for (; i != NumOperands; ++i) {
  1680. const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
  1681. if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
  1682. APInt NewScale =
  1683. Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
  1684. if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
  1685. // A multiplication of a constant with another add; recurse.
  1686. const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
  1687. Interesting |=
  1688. CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
  1689. Add->op_begin(), Add->getNumOperands(),
  1690. NewScale, SE);
  1691. } else {
  1692. // A multiplication of a constant with some other value. Update
  1693. // the map.
  1694. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
  1695. const SCEV *Key = SE.getMulExpr(MulOps);
  1696. std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
  1697. M.insert(std::make_pair(Key, NewScale));
  1698. if (Pair.second) {
  1699. NewOps.push_back(Pair.first->first);
  1700. } else {
  1701. Pair.first->second += NewScale;
  1702. // The map already had an entry for this value, which may indicate
  1703. // a folding opportunity.
  1704. Interesting = true;
  1705. }
  1706. }
  1707. } else {
  1708. // An ordinary operand. Update the map.
  1709. std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
  1710. M.insert(std::make_pair(Ops[i], Scale));
  1711. if (Pair.second) {
  1712. NewOps.push_back(Pair.first->first);
  1713. } else {
  1714. Pair.first->second += Scale;
  1715. // The map already had an entry for this value, which may indicate
  1716. // a folding opportunity.
  1717. Interesting = true;
  1718. }
  1719. }
  1720. }
  1721. return Interesting;
  1722. }
  1723. namespace {
  1724. struct APIntCompare {
  1725. bool operator()(const APInt &LHS, const APInt &RHS) const {
  1726. return LHS.ult(RHS);
  1727. }
  1728. };
  1729. }
  1730. // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
  1731. // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
  1732. // can't-overflow flags for the operation if possible.
  1733. static SCEV::NoWrapFlags
  1734. StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
  1735. const SmallVectorImpl<const SCEV *> &Ops,
  1736. SCEV::NoWrapFlags OldFlags) {
  1737. using namespace std::placeholders;
  1738. bool CanAnalyze =
  1739. Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
  1740. (void)CanAnalyze;
  1741. assert(CanAnalyze && "don't call from other places!");
  1742. int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
  1743. SCEV::NoWrapFlags SignOrUnsignWrap =
  1744. ScalarEvolution::maskFlags(OldFlags, SignOrUnsignMask);
  1745. // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
  1746. auto IsKnownNonNegative =
  1747. std::bind(std::mem_fn(&ScalarEvolution::isKnownNonNegative), SE, _1);
  1748. if (SignOrUnsignWrap == SCEV::FlagNSW &&
  1749. std::all_of(Ops.begin(), Ops.end(), IsKnownNonNegative))
  1750. return ScalarEvolution::setFlags(OldFlags,
  1751. (SCEV::NoWrapFlags)SignOrUnsignMask);
  1752. return OldFlags;
  1753. }
  1754. /// getAddExpr - Get a canonical add expression, or something simpler if
  1755. /// possible.
  1756. const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
  1757. SCEV::NoWrapFlags Flags) {
  1758. assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
  1759. "only nuw or nsw allowed");
  1760. assert(!Ops.empty() && "Cannot get empty add!");
  1761. if (Ops.size() == 1) return Ops[0];
  1762. #ifndef NDEBUG
  1763. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  1764. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  1765. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  1766. "SCEVAddExpr operand types don't match!");
  1767. #endif
  1768. Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags);
  1769. // Sort by complexity, this groups all similar expression types together.
  1770. GroupByComplexity(Ops, LI);
  1771. // If there are any constants, fold them together.
  1772. unsigned Idx = 0;
  1773. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  1774. ++Idx;
  1775. assert(Idx < Ops.size());
  1776. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  1777. // We found two constants, fold them together!
  1778. Ops[0] = getConstant(LHSC->getValue()->getValue() +
  1779. RHSC->getValue()->getValue());
  1780. if (Ops.size() == 2) return Ops[0];
  1781. Ops.erase(Ops.begin()+1); // Erase the folded element
  1782. LHSC = cast<SCEVConstant>(Ops[0]);
  1783. }
  1784. // If we are left with a constant zero being added, strip it off.
  1785. if (LHSC->getValue()->isZero()) {
  1786. Ops.erase(Ops.begin());
  1787. --Idx;
  1788. }
  1789. if (Ops.size() == 1) return Ops[0];
  1790. }
  1791. // Okay, check to see if the same value occurs in the operand list more than
  1792. // once. If so, merge them together into an multiply expression. Since we
  1793. // sorted the list, these values are required to be adjacent.
  1794. Type *Ty = Ops[0]->getType();
  1795. bool FoundMatch = false;
  1796. for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
  1797. if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
  1798. // Scan ahead to count how many equal operands there are.
  1799. unsigned Count = 2;
  1800. while (i+Count != e && Ops[i+Count] == Ops[i])
  1801. ++Count;
  1802. // Merge the values into a multiply.
  1803. const SCEV *Scale = getConstant(Ty, Count);
  1804. const SCEV *Mul = getMulExpr(Scale, Ops[i]);
  1805. if (Ops.size() == Count)
  1806. return Mul;
  1807. Ops[i] = Mul;
  1808. Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
  1809. --i; e -= Count - 1;
  1810. FoundMatch = true;
  1811. }
  1812. if (FoundMatch)
  1813. return getAddExpr(Ops, Flags);
  1814. // Check for truncates. If all the operands are truncated from the same
  1815. // type, see if factoring out the truncate would permit the result to be
  1816. // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
  1817. // if the contents of the resulting outer trunc fold to something simple.
  1818. for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
  1819. const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
  1820. Type *DstType = Trunc->getType();
  1821. Type *SrcType = Trunc->getOperand()->getType();
  1822. SmallVector<const SCEV *, 8> LargeOps;
  1823. bool Ok = true;
  1824. // Check all the operands to see if they can be represented in the
  1825. // source type of the truncate.
  1826. for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
  1827. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
  1828. if (T->getOperand()->getType() != SrcType) {
  1829. Ok = false;
  1830. break;
  1831. }
  1832. LargeOps.push_back(T->getOperand());
  1833. } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
  1834. LargeOps.push_back(getAnyExtendExpr(C, SrcType));
  1835. } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
  1836. SmallVector<const SCEV *, 8> LargeMulOps;
  1837. for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
  1838. if (const SCEVTruncateExpr *T =
  1839. dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
  1840. if (T->getOperand()->getType() != SrcType) {
  1841. Ok = false;
  1842. break;
  1843. }
  1844. LargeMulOps.push_back(T->getOperand());
  1845. } else if (const SCEVConstant *C =
  1846. dyn_cast<SCEVConstant>(M->getOperand(j))) {
  1847. LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
  1848. } else {
  1849. Ok = false;
  1850. break;
  1851. }
  1852. }
  1853. if (Ok)
  1854. LargeOps.push_back(getMulExpr(LargeMulOps));
  1855. } else {
  1856. Ok = false;
  1857. break;
  1858. }
  1859. }
  1860. if (Ok) {
  1861. // Evaluate the expression in the larger type.
  1862. const SCEV *Fold = getAddExpr(LargeOps, Flags);
  1863. // If it folds to something simple, use it. Otherwise, don't.
  1864. if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
  1865. return getTruncateExpr(Fold, DstType);
  1866. }
  1867. }
  1868. // Skip past any other cast SCEVs.
  1869. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
  1870. ++Idx;
  1871. // If there are add operands they would be next.
  1872. if (Idx < Ops.size()) {
  1873. bool DeletedAdd = false;
  1874. while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
  1875. // If we have an add, expand the add operands onto the end of the operands
  1876. // list.
  1877. Ops.erase(Ops.begin()+Idx);
  1878. Ops.append(Add->op_begin(), Add->op_end());
  1879. DeletedAdd = true;
  1880. }
  1881. // If we deleted at least one add, we added operands to the end of the list,
  1882. // and they are not necessarily sorted. Recurse to resort and resimplify
  1883. // any operands we just acquired.
  1884. if (DeletedAdd)
  1885. return getAddExpr(Ops);
  1886. }
  1887. // Skip over the add expression until we get to a multiply.
  1888. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
  1889. ++Idx;
  1890. // Check to see if there are any folding opportunities present with
  1891. // operands multiplied by constant values.
  1892. if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
  1893. uint64_t BitWidth = getTypeSizeInBits(Ty);
  1894. DenseMap<const SCEV *, APInt> M;
  1895. SmallVector<const SCEV *, 8> NewOps;
  1896. APInt AccumulatedConstant(BitWidth, 0);
  1897. if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
  1898. Ops.data(), Ops.size(),
  1899. APInt(BitWidth, 1), *this)) {
  1900. // Some interesting folding opportunity is present, so its worthwhile to
  1901. // re-generate the operands list. Group the operands by constant scale,
  1902. // to avoid multiplying by the same constant scale multiple times.
  1903. std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
  1904. for (SmallVectorImpl<const SCEV *>::const_iterator I = NewOps.begin(),
  1905. E = NewOps.end(); I != E; ++I)
  1906. MulOpLists[M.find(*I)->second].push_back(*I);
  1907. // Re-generate the operands list.
  1908. Ops.clear();
  1909. if (AccumulatedConstant != 0)
  1910. Ops.push_back(getConstant(AccumulatedConstant));
  1911. for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
  1912. I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
  1913. if (I->first != 0)
  1914. Ops.push_back(getMulExpr(getConstant(I->first),
  1915. getAddExpr(I->second)));
  1916. if (Ops.empty())
  1917. return getConstant(Ty, 0);
  1918. if (Ops.size() == 1)
  1919. return Ops[0];
  1920. return getAddExpr(Ops);
  1921. }
  1922. }
  1923. // If we are adding something to a multiply expression, make sure the
  1924. // something is not already an operand of the multiply. If so, merge it into
  1925. // the multiply.
  1926. for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
  1927. const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
  1928. for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
  1929. const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
  1930. if (isa<SCEVConstant>(MulOpSCEV))
  1931. continue;
  1932. for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
  1933. if (MulOpSCEV == Ops[AddOp]) {
  1934. // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
  1935. const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
  1936. if (Mul->getNumOperands() != 2) {
  1937. // If the multiply has more than two operands, we must get the
  1938. // Y*Z term.
  1939. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
  1940. Mul->op_begin()+MulOp);
  1941. MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
  1942. InnerMul = getMulExpr(MulOps);
  1943. }
  1944. const SCEV *One = getConstant(Ty, 1);
  1945. const SCEV *AddOne = getAddExpr(One, InnerMul);
  1946. const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
  1947. if (Ops.size() == 2) return OuterMul;
  1948. if (AddOp < Idx) {
  1949. Ops.erase(Ops.begin()+AddOp);
  1950. Ops.erase(Ops.begin()+Idx-1);
  1951. } else {
  1952. Ops.erase(Ops.begin()+Idx);
  1953. Ops.erase(Ops.begin()+AddOp-1);
  1954. }
  1955. Ops.push_back(OuterMul);
  1956. return getAddExpr(Ops);
  1957. }
  1958. // Check this multiply against other multiplies being added together.
  1959. for (unsigned OtherMulIdx = Idx+1;
  1960. OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
  1961. ++OtherMulIdx) {
  1962. const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
  1963. // If MulOp occurs in OtherMul, we can fold the two multiplies
  1964. // together.
  1965. for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
  1966. OMulOp != e; ++OMulOp)
  1967. if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
  1968. // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
  1969. const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
  1970. if (Mul->getNumOperands() != 2) {
  1971. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
  1972. Mul->op_begin()+MulOp);
  1973. MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
  1974. InnerMul1 = getMulExpr(MulOps);
  1975. }
  1976. const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
  1977. if (OtherMul->getNumOperands() != 2) {
  1978. SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
  1979. OtherMul->op_begin()+OMulOp);
  1980. MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
  1981. InnerMul2 = getMulExpr(MulOps);
  1982. }
  1983. const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
  1984. const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
  1985. if (Ops.size() == 2) return OuterMul;
  1986. Ops.erase(Ops.begin()+Idx);
  1987. Ops.erase(Ops.begin()+OtherMulIdx-1);
  1988. Ops.push_back(OuterMul);
  1989. return getAddExpr(Ops);
  1990. }
  1991. }
  1992. }
  1993. }
  1994. // If there are any add recurrences in the operands list, see if any other
  1995. // added values are loop invariant. If so, we can fold them into the
  1996. // recurrence.
  1997. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
  1998. ++Idx;
  1999. // Scan over all recurrences, trying to fold loop invariants into them.
  2000. for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
  2001. // Scan all of the other operands to this add and add them to the vector if
  2002. // they are loop invariant w.r.t. the recurrence.
  2003. SmallVector<const SCEV *, 8> LIOps;
  2004. const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
  2005. const Loop *AddRecLoop = AddRec->getLoop();
  2006. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2007. if (isLoopInvariant(Ops[i], AddRecLoop)) {
  2008. LIOps.push_back(Ops[i]);
  2009. Ops.erase(Ops.begin()+i);
  2010. --i; --e;
  2011. }
  2012. // If we found some loop invariants, fold them into the recurrence.
  2013. if (!LIOps.empty()) {
  2014. // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
  2015. LIOps.push_back(AddRec->getStart());
  2016. SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
  2017. AddRec->op_end());
  2018. AddRecOps[0] = getAddExpr(LIOps);
  2019. // Build the new addrec. Propagate the NUW and NSW flags if both the
  2020. // outer add and the inner addrec are guaranteed to have no overflow.
  2021. // Always propagate NW.
  2022. Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
  2023. const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
  2024. // If all of the other operands were loop invariant, we are done.
  2025. if (Ops.size() == 1) return NewRec;
  2026. // Otherwise, add the folded AddRec by the non-invariant parts.
  2027. for (unsigned i = 0;; ++i)
  2028. if (Ops[i] == AddRec) {
  2029. Ops[i] = NewRec;
  2030. break;
  2031. }
  2032. return getAddExpr(Ops);
  2033. }
  2034. // Okay, if there weren't any loop invariants to be folded, check to see if
  2035. // there are multiple AddRec's with the same loop induction variable being
  2036. // added together. If so, we can fold them.
  2037. for (unsigned OtherIdx = Idx+1;
  2038. OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  2039. ++OtherIdx)
  2040. if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
  2041. // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
  2042. SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
  2043. AddRec->op_end());
  2044. for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  2045. ++OtherIdx)
  2046. if (const SCEVAddRecExpr *OtherAddRec =
  2047. dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
  2048. if (OtherAddRec->getLoop() == AddRecLoop) {
  2049. for (unsigned i = 0, e = OtherAddRec->getNumOperands();
  2050. i != e; ++i) {
  2051. if (i >= AddRecOps.size()) {
  2052. AddRecOps.append(OtherAddRec->op_begin()+i,
  2053. OtherAddRec->op_end());
  2054. break;
  2055. }
  2056. AddRecOps[i] = getAddExpr(AddRecOps[i],
  2057. OtherAddRec->getOperand(i));
  2058. }
  2059. Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
  2060. }
  2061. // Step size has changed, so we cannot guarantee no self-wraparound.
  2062. Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
  2063. return getAddExpr(Ops);
  2064. }
  2065. // Otherwise couldn't fold anything into this recurrence. Move onto the
  2066. // next one.
  2067. }
  2068. // Okay, it looks like we really DO need an add expr. Check to see if we
  2069. // already have one, otherwise create a new one.
  2070. FoldingSetNodeID ID;
  2071. ID.AddInteger(scAddExpr);
  2072. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2073. ID.AddPointer(Ops[i]);
  2074. void *IP = nullptr;
  2075. SCEVAddExpr *S =
  2076. static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  2077. if (!S) {
  2078. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2079. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2080. S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
  2081. O, Ops.size());
  2082. UniqueSCEVs.InsertNode(S, IP);
  2083. }
  2084. S->setNoWrapFlags(Flags);
  2085. return S;
  2086. }
  2087. static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
  2088. uint64_t k = i*j;
  2089. if (j > 1 && k / j != i) Overflow = true;
  2090. return k;
  2091. }
  2092. /// Compute the result of "n choose k", the binomial coefficient. If an
  2093. /// intermediate computation overflows, Overflow will be set and the return will
  2094. /// be garbage. Overflow is not cleared on absence of overflow.
  2095. static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
  2096. // We use the multiplicative formula:
  2097. // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
  2098. // At each iteration, we take the n-th term of the numeral and divide by the
  2099. // (k-n)th term of the denominator. This division will always produce an
  2100. // integral result, and helps reduce the chance of overflow in the
  2101. // intermediate computations. However, we can still overflow even when the
  2102. // final result would fit.
  2103. if (n == 0 || n == k) return 1;
  2104. if (k > n) return 0;
  2105. if (k > n/2)
  2106. k = n-k;
  2107. uint64_t r = 1;
  2108. for (uint64_t i = 1; i <= k; ++i) {
  2109. r = umul_ov(r, n-(i-1), Overflow);
  2110. r /= i;
  2111. }
  2112. return r;
  2113. }
  2114. /// Determine if any of the operands in this SCEV are a constant or if
  2115. /// any of the add or multiply expressions in this SCEV contain a constant.
  2116. static bool containsConstantSomewhere(const SCEV *StartExpr) {
  2117. SmallVector<const SCEV *, 4> Ops;
  2118. Ops.push_back(StartExpr);
  2119. while (!Ops.empty()) {
  2120. const SCEV *CurrentExpr = Ops.pop_back_val();
  2121. if (isa<SCEVConstant>(*CurrentExpr))
  2122. return true;
  2123. if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) {
  2124. const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr);
  2125. Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end());
  2126. }
  2127. }
  2128. return false;
  2129. }
  2130. /// getMulExpr - Get a canonical multiply expression, or something simpler if
  2131. /// possible.
  2132. const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
  2133. SCEV::NoWrapFlags Flags) {
  2134. assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
  2135. "only nuw or nsw allowed");
  2136. assert(!Ops.empty() && "Cannot get empty mul!");
  2137. if (Ops.size() == 1) return Ops[0];
  2138. #ifndef NDEBUG
  2139. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  2140. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  2141. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  2142. "SCEVMulExpr operand types don't match!");
  2143. #endif
  2144. Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
  2145. // Sort by complexity, this groups all similar expression types together.
  2146. GroupByComplexity(Ops, LI);
  2147. // If there are any constants, fold them together.
  2148. unsigned Idx = 0;
  2149. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  2150. // C1*(C2+V) -> C1*C2 + C1*V
  2151. if (Ops.size() == 2)
  2152. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
  2153. // If any of Add's ops are Adds or Muls with a constant,
  2154. // apply this transformation as well.
  2155. if (Add->getNumOperands() == 2)
  2156. if (containsConstantSomewhere(Add))
  2157. return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
  2158. getMulExpr(LHSC, Add->getOperand(1)));
  2159. ++Idx;
  2160. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  2161. // We found two constants, fold them together!
  2162. ConstantInt *Fold = ConstantInt::get(getContext(),
  2163. LHSC->getValue()->getValue() *
  2164. RHSC->getValue()->getValue());
  2165. Ops[0] = getConstant(Fold);
  2166. Ops.erase(Ops.begin()+1); // Erase the folded element
  2167. if (Ops.size() == 1) return Ops[0];
  2168. LHSC = cast<SCEVConstant>(Ops[0]);
  2169. }
  2170. // If we are left with a constant one being multiplied, strip it off.
  2171. if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
  2172. Ops.erase(Ops.begin());
  2173. --Idx;
  2174. } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
  2175. // If we have a multiply of zero, it will always be zero.
  2176. return Ops[0];
  2177. } else if (Ops[0]->isAllOnesValue()) {
  2178. // If we have a mul by -1 of an add, try distributing the -1 among the
  2179. // add operands.
  2180. if (Ops.size() == 2) {
  2181. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
  2182. SmallVector<const SCEV *, 4> NewOps;
  2183. bool AnyFolded = false;
  2184. for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
  2185. E = Add->op_end(); I != E; ++I) {
  2186. const SCEV *Mul = getMulExpr(Ops[0], *I);
  2187. if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
  2188. NewOps.push_back(Mul);
  2189. }
  2190. if (AnyFolded)
  2191. return getAddExpr(NewOps);
  2192. }
  2193. else if (const SCEVAddRecExpr *
  2194. AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
  2195. // Negation preserves a recurrence's no self-wrap property.
  2196. SmallVector<const SCEV *, 4> Operands;
  2197. for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
  2198. E = AddRec->op_end(); I != E; ++I) {
  2199. Operands.push_back(getMulExpr(Ops[0], *I));
  2200. }
  2201. return getAddRecExpr(Operands, AddRec->getLoop(),
  2202. AddRec->getNoWrapFlags(SCEV::FlagNW));
  2203. }
  2204. }
  2205. }
  2206. if (Ops.size() == 1)
  2207. return Ops[0];
  2208. }
  2209. // Skip over the add expression until we get to a multiply.
  2210. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
  2211. ++Idx;
  2212. // If there are mul operands inline them all into this expression.
  2213. if (Idx < Ops.size()) {
  2214. bool DeletedMul = false;
  2215. while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
  2216. // If we have an mul, expand the mul operands onto the end of the operands
  2217. // list.
  2218. Ops.erase(Ops.begin()+Idx);
  2219. Ops.append(Mul->op_begin(), Mul->op_end());
  2220. DeletedMul = true;
  2221. }
  2222. // If we deleted at least one mul, we added operands to the end of the list,
  2223. // and they are not necessarily sorted. Recurse to resort and resimplify
  2224. // any operands we just acquired.
  2225. if (DeletedMul)
  2226. return getMulExpr(Ops);
  2227. }
  2228. // If there are any add recurrences in the operands list, see if any other
  2229. // added values are loop invariant. If so, we can fold them into the
  2230. // recurrence.
  2231. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
  2232. ++Idx;
  2233. // Scan over all recurrences, trying to fold loop invariants into them.
  2234. for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
  2235. // Scan all of the other operands to this mul and add them to the vector if
  2236. // they are loop invariant w.r.t. the recurrence.
  2237. SmallVector<const SCEV *, 8> LIOps;
  2238. const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
  2239. const Loop *AddRecLoop = AddRec->getLoop();
  2240. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2241. if (isLoopInvariant(Ops[i], AddRecLoop)) {
  2242. LIOps.push_back(Ops[i]);
  2243. Ops.erase(Ops.begin()+i);
  2244. --i; --e;
  2245. }
  2246. // If we found some loop invariants, fold them into the recurrence.
  2247. if (!LIOps.empty()) {
  2248. // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
  2249. SmallVector<const SCEV *, 4> NewOps;
  2250. NewOps.reserve(AddRec->getNumOperands());
  2251. const SCEV *Scale = getMulExpr(LIOps);
  2252. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
  2253. NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
  2254. // Build the new addrec. Propagate the NUW and NSW flags if both the
  2255. // outer mul and the inner addrec are guaranteed to have no overflow.
  2256. //
  2257. // No self-wrap cannot be guaranteed after changing the step size, but
  2258. // will be inferred if either NUW or NSW is true.
  2259. Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
  2260. const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
  2261. // If all of the other operands were loop invariant, we are done.
  2262. if (Ops.size() == 1) return NewRec;
  2263. // Otherwise, multiply the folded AddRec by the non-invariant parts.
  2264. for (unsigned i = 0;; ++i)
  2265. if (Ops[i] == AddRec) {
  2266. Ops[i] = NewRec;
  2267. break;
  2268. }
  2269. return getMulExpr(Ops);
  2270. }
  2271. // Okay, if there weren't any loop invariants to be folded, check to see if
  2272. // there are multiple AddRec's with the same loop induction variable being
  2273. // multiplied together. If so, we can fold them.
  2274. // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
  2275. // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
  2276. // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
  2277. // ]]],+,...up to x=2n}.
  2278. // Note that the arguments to choose() are always integers with values
  2279. // known at compile time, never SCEV objects.
  2280. //
  2281. // The implementation avoids pointless extra computations when the two
  2282. // addrec's are of different length (mathematically, it's equivalent to
  2283. // an infinite stream of zeros on the right).
  2284. bool OpsModified = false;
  2285. for (unsigned OtherIdx = Idx+1;
  2286. OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  2287. ++OtherIdx) {
  2288. const SCEVAddRecExpr *OtherAddRec =
  2289. dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
  2290. if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
  2291. continue;
  2292. bool Overflow = false;
  2293. Type *Ty = AddRec->getType();
  2294. bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
  2295. SmallVector<const SCEV*, 7> AddRecOps;
  2296. for (int x = 0, xe = AddRec->getNumOperands() +
  2297. OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
  2298. const SCEV *Term = getConstant(Ty, 0);
  2299. for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
  2300. uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
  2301. for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
  2302. ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
  2303. z < ze && !Overflow; ++z) {
  2304. uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
  2305. uint64_t Coeff;
  2306. if (LargerThan64Bits)
  2307. Coeff = umul_ov(Coeff1, Coeff2, Overflow);
  2308. else
  2309. Coeff = Coeff1*Coeff2;
  2310. const SCEV *CoeffTerm = getConstant(Ty, Coeff);
  2311. const SCEV *Term1 = AddRec->getOperand(y-z);
  2312. const SCEV *Term2 = OtherAddRec->getOperand(z);
  2313. Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
  2314. }
  2315. }
  2316. AddRecOps.push_back(Term);
  2317. }
  2318. if (!Overflow) {
  2319. const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
  2320. SCEV::FlagAnyWrap);
  2321. if (Ops.size() == 2) return NewAddRec;
  2322. Ops[Idx] = NewAddRec;
  2323. Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
  2324. OpsModified = true;
  2325. AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
  2326. if (!AddRec)
  2327. break;
  2328. }
  2329. }
  2330. if (OpsModified)
  2331. return getMulExpr(Ops);
  2332. // Otherwise couldn't fold anything into this recurrence. Move onto the
  2333. // next one.
  2334. }
  2335. // Okay, it looks like we really DO need an mul expr. Check to see if we
  2336. // already have one, otherwise create a new one.
  2337. FoldingSetNodeID ID;
  2338. ID.AddInteger(scMulExpr);
  2339. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2340. ID.AddPointer(Ops[i]);
  2341. void *IP = nullptr;
  2342. SCEVMulExpr *S =
  2343. static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  2344. if (!S) {
  2345. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2346. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2347. S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
  2348. O, Ops.size());
  2349. UniqueSCEVs.InsertNode(S, IP);
  2350. }
  2351. S->setNoWrapFlags(Flags);
  2352. return S;
  2353. }
  2354. /// getUDivExpr - Get a canonical unsigned division expression, or something
  2355. /// simpler if possible.
  2356. const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
  2357. const SCEV *RHS) {
  2358. assert(getEffectiveSCEVType(LHS->getType()) ==
  2359. getEffectiveSCEVType(RHS->getType()) &&
  2360. "SCEVUDivExpr operand types don't match!");
  2361. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
  2362. if (RHSC->getValue()->equalsInt(1))
  2363. return LHS; // X udiv 1 --> x
  2364. // If the denominator is zero, the result of the udiv is undefined. Don't
  2365. // try to analyze it, because the resolution chosen here may differ from
  2366. // the resolution chosen in other parts of the compiler.
  2367. if (!RHSC->getValue()->isZero()) {
  2368. // Determine if the division can be folded into the operands of
  2369. // its operands.
  2370. // TODO: Generalize this to non-constants by using known-bits information.
  2371. Type *Ty = LHS->getType();
  2372. unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
  2373. unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
  2374. // For non-power-of-two values, effectively round the value up to the
  2375. // nearest power of two.
  2376. if (!RHSC->getValue()->getValue().isPowerOf2())
  2377. ++MaxShiftAmt;
  2378. IntegerType *ExtTy =
  2379. IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
  2380. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
  2381. if (const SCEVConstant *Step =
  2382. dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
  2383. // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
  2384. const APInt &StepInt = Step->getValue()->getValue();
  2385. const APInt &DivInt = RHSC->getValue()->getValue();
  2386. if (!StepInt.urem(DivInt) &&
  2387. getZeroExtendExpr(AR, ExtTy) ==
  2388. getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
  2389. getZeroExtendExpr(Step, ExtTy),
  2390. AR->getLoop(), SCEV::FlagAnyWrap)) {
  2391. SmallVector<const SCEV *, 4> Operands;
  2392. for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
  2393. Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
  2394. return getAddRecExpr(Operands, AR->getLoop(),
  2395. SCEV::FlagNW);
  2396. }
  2397. /// Get a canonical UDivExpr for a recurrence.
  2398. /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
  2399. // We can currently only fold X%N if X is constant.
  2400. const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
  2401. if (StartC && !DivInt.urem(StepInt) &&
  2402. getZeroExtendExpr(AR, ExtTy) ==
  2403. getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
  2404. getZeroExtendExpr(Step, ExtTy),
  2405. AR->getLoop(), SCEV::FlagAnyWrap)) {
  2406. const APInt &StartInt = StartC->getValue()->getValue();
  2407. const APInt &StartRem = StartInt.urem(StepInt);
  2408. if (StartRem != 0)
  2409. LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
  2410. AR->getLoop(), SCEV::FlagNW);
  2411. }
  2412. }
  2413. // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
  2414. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
  2415. SmallVector<const SCEV *, 4> Operands;
  2416. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
  2417. Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
  2418. if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
  2419. // Find an operand that's safely divisible.
  2420. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
  2421. const SCEV *Op = M->getOperand(i);
  2422. const SCEV *Div = getUDivExpr(Op, RHSC);
  2423. if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
  2424. Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
  2425. M->op_end());
  2426. Operands[i] = Div;
  2427. return getMulExpr(Operands);
  2428. }
  2429. }
  2430. }
  2431. // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
  2432. if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
  2433. SmallVector<const SCEV *, 4> Operands;
  2434. for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
  2435. Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
  2436. if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
  2437. Operands.clear();
  2438. for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
  2439. const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
  2440. if (isa<SCEVUDivExpr>(Op) ||
  2441. getMulExpr(Op, RHS) != A->getOperand(i))
  2442. break;
  2443. Operands.push_back(Op);
  2444. }
  2445. if (Operands.size() == A->getNumOperands())
  2446. return getAddExpr(Operands);
  2447. }
  2448. }
  2449. // Fold if both operands are constant.
  2450. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
  2451. Constant *LHSCV = LHSC->getValue();
  2452. Constant *RHSCV = RHSC->getValue();
  2453. return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
  2454. RHSCV)));
  2455. }
  2456. }
  2457. }
  2458. FoldingSetNodeID ID;
  2459. ID.AddInteger(scUDivExpr);
  2460. ID.AddPointer(LHS);
  2461. ID.AddPointer(RHS);
  2462. void *IP = nullptr;
  2463. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  2464. SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
  2465. LHS, RHS);
  2466. UniqueSCEVs.InsertNode(S, IP);
  2467. return S;
  2468. }
  2469. static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
  2470. APInt A = C1->getValue()->getValue().abs();
  2471. APInt B = C2->getValue()->getValue().abs();
  2472. uint32_t ABW = A.getBitWidth();
  2473. uint32_t BBW = B.getBitWidth();
  2474. if (ABW > BBW)
  2475. B = B.zext(ABW);
  2476. else if (ABW < BBW)
  2477. A = A.zext(BBW);
  2478. return APIntOps::GreatestCommonDivisor(A, B);
  2479. }
  2480. /// getUDivExactExpr - Get a canonical unsigned division expression, or
  2481. /// something simpler if possible. There is no representation for an exact udiv
  2482. /// in SCEV IR, but we can attempt to remove factors from the LHS and RHS.
  2483. /// We can't do this when it's not exact because the udiv may be clearing bits.
  2484. const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
  2485. const SCEV *RHS) {
  2486. // TODO: we could try to find factors in all sorts of things, but for now we
  2487. // just deal with u/exact (multiply, constant). See SCEVDivision towards the
  2488. // end of this file for inspiration.
  2489. const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
  2490. if (!Mul)
  2491. return getUDivExpr(LHS, RHS);
  2492. if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
  2493. // If the mulexpr multiplies by a constant, then that constant must be the
  2494. // first element of the mulexpr.
  2495. if (const SCEVConstant *LHSCst =
  2496. dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
  2497. if (LHSCst == RHSCst) {
  2498. SmallVector<const SCEV *, 2> Operands;
  2499. Operands.append(Mul->op_begin() + 1, Mul->op_end());
  2500. return getMulExpr(Operands);
  2501. }
  2502. // We can't just assume that LHSCst divides RHSCst cleanly, it could be
  2503. // that there's a factor provided by one of the other terms. We need to
  2504. // check.
  2505. APInt Factor = gcd(LHSCst, RHSCst);
  2506. if (!Factor.isIntN(1)) {
  2507. LHSCst = cast<SCEVConstant>(
  2508. getConstant(LHSCst->getValue()->getValue().udiv(Factor)));
  2509. RHSCst = cast<SCEVConstant>(
  2510. getConstant(RHSCst->getValue()->getValue().udiv(Factor)));
  2511. SmallVector<const SCEV *, 2> Operands;
  2512. Operands.push_back(LHSCst);
  2513. Operands.append(Mul->op_begin() + 1, Mul->op_end());
  2514. LHS = getMulExpr(Operands);
  2515. RHS = RHSCst;
  2516. Mul = dyn_cast<SCEVMulExpr>(LHS);
  2517. if (!Mul)
  2518. return getUDivExactExpr(LHS, RHS);
  2519. }
  2520. }
  2521. }
  2522. for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
  2523. if (Mul->getOperand(i) == RHS) {
  2524. SmallVector<const SCEV *, 2> Operands;
  2525. Operands.append(Mul->op_begin(), Mul->op_begin() + i);
  2526. Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
  2527. return getMulExpr(Operands);
  2528. }
  2529. }
  2530. return getUDivExpr(LHS, RHS);
  2531. }
  2532. /// getAddRecExpr - Get an add recurrence expression for the specified loop.
  2533. /// Simplify the expression as much as possible.
  2534. const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
  2535. const Loop *L,
  2536. SCEV::NoWrapFlags Flags) {
  2537. SmallVector<const SCEV *, 4> Operands;
  2538. Operands.push_back(Start);
  2539. if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
  2540. if (StepChrec->getLoop() == L) {
  2541. Operands.append(StepChrec->op_begin(), StepChrec->op_end());
  2542. return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
  2543. }
  2544. Operands.push_back(Step);
  2545. return getAddRecExpr(Operands, L, Flags);
  2546. }
  2547. /// getAddRecExpr - Get an add recurrence expression for the specified loop.
  2548. /// Simplify the expression as much as possible.
  2549. const SCEV *
  2550. ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
  2551. const Loop *L, SCEV::NoWrapFlags Flags) {
  2552. if (Operands.size() == 1) return Operands[0];
  2553. #ifndef NDEBUG
  2554. Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
  2555. for (unsigned i = 1, e = Operands.size(); i != e; ++i)
  2556. assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
  2557. "SCEVAddRecExpr operand types don't match!");
  2558. for (unsigned i = 0, e = Operands.size(); i != e; ++i)
  2559. assert(isLoopInvariant(Operands[i], L) &&
  2560. "SCEVAddRecExpr operand is not loop-invariant!");
  2561. #endif
  2562. if (Operands.back()->isZero()) {
  2563. Operands.pop_back();
  2564. return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
  2565. }
  2566. // It's tempting to want to call getMaxBackedgeTakenCount count here and
  2567. // use that information to infer NUW and NSW flags. However, computing a
  2568. // BE count requires calling getAddRecExpr, so we may not yet have a
  2569. // meaningful BE count at this point (and if we don't, we'd be stuck
  2570. // with a SCEVCouldNotCompute as the cached BE count).
  2571. Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
  2572. // Canonicalize nested AddRecs in by nesting them in order of loop depth.
  2573. if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
  2574. const Loop *NestedLoop = NestedAR->getLoop();
  2575. if (L->contains(NestedLoop) ?
  2576. (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
  2577. (!NestedLoop->contains(L) &&
  2578. DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
  2579. SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
  2580. NestedAR->op_end());
  2581. Operands[0] = NestedAR->getStart();
  2582. // AddRecs require their operands be loop-invariant with respect to their
  2583. // loops. Don't perform this transformation if it would break this
  2584. // requirement.
  2585. bool AllInvariant = true;
  2586. for (unsigned i = 0, e = Operands.size(); i != e; ++i)
  2587. if (!isLoopInvariant(Operands[i], L)) {
  2588. AllInvariant = false;
  2589. break;
  2590. }
  2591. if (AllInvariant) {
  2592. // Create a recurrence for the outer loop with the same step size.
  2593. //
  2594. // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
  2595. // inner recurrence has the same property.
  2596. SCEV::NoWrapFlags OuterFlags =
  2597. maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
  2598. NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
  2599. AllInvariant = true;
  2600. for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
  2601. if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
  2602. AllInvariant = false;
  2603. break;
  2604. }
  2605. if (AllInvariant) {
  2606. // Ok, both add recurrences are valid after the transformation.
  2607. //
  2608. // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
  2609. // the outer recurrence has the same property.
  2610. SCEV::NoWrapFlags InnerFlags =
  2611. maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
  2612. return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
  2613. }
  2614. }
  2615. // Reset Operands to its original state.
  2616. Operands[0] = NestedAR;
  2617. }
  2618. }
  2619. // Okay, it looks like we really DO need an addrec expr. Check to see if we
  2620. // already have one, otherwise create a new one.
  2621. FoldingSetNodeID ID;
  2622. ID.AddInteger(scAddRecExpr);
  2623. for (unsigned i = 0, e = Operands.size(); i != e; ++i)
  2624. ID.AddPointer(Operands[i]);
  2625. ID.AddPointer(L);
  2626. void *IP = nullptr;
  2627. SCEVAddRecExpr *S =
  2628. static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  2629. if (!S) {
  2630. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
  2631. std::uninitialized_copy(Operands.begin(), Operands.end(), O);
  2632. S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
  2633. O, Operands.size(), L);
  2634. UniqueSCEVs.InsertNode(S, IP);
  2635. }
  2636. S->setNoWrapFlags(Flags);
  2637. return S;
  2638. }
  2639. const SCEV *
  2640. ScalarEvolution::getGEPExpr(Type *PointeeType, const SCEV *BaseExpr,
  2641. const SmallVectorImpl<const SCEV *> &IndexExprs,
  2642. bool InBounds) {
  2643. // getSCEV(Base)->getType() has the same address space as Base->getType()
  2644. // because SCEV::getType() preserves the address space.
  2645. Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType());
  2646. // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
  2647. // instruction to its SCEV, because the Instruction may be guarded by control
  2648. // flow and the no-overflow bits may not be valid for the expression in any
  2649. // context.
  2650. SCEV::NoWrapFlags Wrap = InBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
  2651. const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
  2652. // The address space is unimportant. The first thing we do on CurTy is getting
  2653. // its element type.
  2654. Type *CurTy = PointerType::getUnqual(PointeeType);
  2655. for (const SCEV *IndexExpr : IndexExprs) {
  2656. // Compute the (potentially symbolic) offset in bytes for this index.
  2657. if (StructType *STy = dyn_cast<StructType>(CurTy)) {
  2658. // For a struct, add the member offset.
  2659. ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
  2660. unsigned FieldNo = Index->getZExtValue();
  2661. const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
  2662. // Add the field offset to the running total offset.
  2663. TotalOffset = getAddExpr(TotalOffset, FieldOffset);
  2664. // Update CurTy to the type of the field at Index.
  2665. CurTy = STy->getTypeAtIndex(Index);
  2666. } else {
  2667. // Update CurTy to its element type.
  2668. CurTy = cast<SequentialType>(CurTy)->getElementType();
  2669. // For an array, add the element offset, explicitly scaled.
  2670. const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy);
  2671. // Getelementptr indices are signed.
  2672. IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy);
  2673. // Multiply the index by the element size to compute the element offset.
  2674. const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);
  2675. // Add the element offset to the running total offset.
  2676. TotalOffset = getAddExpr(TotalOffset, LocalOffset);
  2677. }
  2678. }
  2679. // Add the total offset from all the GEP indices to the base.
  2680. return getAddExpr(BaseExpr, TotalOffset, Wrap);
  2681. }
  2682. const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
  2683. const SCEV *RHS) {
  2684. SmallVector<const SCEV *, 2> Ops;
  2685. Ops.push_back(LHS);
  2686. Ops.push_back(RHS);
  2687. return getSMaxExpr(Ops);
  2688. }
  2689. const SCEV *
  2690. ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
  2691. assert(!Ops.empty() && "Cannot get empty smax!");
  2692. if (Ops.size() == 1) return Ops[0];
  2693. #ifndef NDEBUG
  2694. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  2695. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  2696. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  2697. "SCEVSMaxExpr operand types don't match!");
  2698. #endif
  2699. // Sort by complexity, this groups all similar expression types together.
  2700. GroupByComplexity(Ops, LI);
  2701. // If there are any constants, fold them together.
  2702. unsigned Idx = 0;
  2703. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  2704. ++Idx;
  2705. assert(Idx < Ops.size());
  2706. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  2707. // We found two constants, fold them together!
  2708. ConstantInt *Fold = ConstantInt::get(getContext(),
  2709. APIntOps::smax(LHSC->getValue()->getValue(),
  2710. RHSC->getValue()->getValue()));
  2711. Ops[0] = getConstant(Fold);
  2712. Ops.erase(Ops.begin()+1); // Erase the folded element
  2713. if (Ops.size() == 1) return Ops[0];
  2714. LHSC = cast<SCEVConstant>(Ops[0]);
  2715. }
  2716. // If we are left with a constant minimum-int, strip it off.
  2717. if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
  2718. Ops.erase(Ops.begin());
  2719. --Idx;
  2720. } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
  2721. // If we have an smax with a constant maximum-int, it will always be
  2722. // maximum-int.
  2723. return Ops[0];
  2724. }
  2725. if (Ops.size() == 1) return Ops[0];
  2726. }
  2727. // Find the first SMax
  2728. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
  2729. ++Idx;
  2730. // Check to see if one of the operands is an SMax. If so, expand its operands
  2731. // onto our operand list, and recurse to simplify.
  2732. if (Idx < Ops.size()) {
  2733. bool DeletedSMax = false;
  2734. while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
  2735. Ops.erase(Ops.begin()+Idx);
  2736. Ops.append(SMax->op_begin(), SMax->op_end());
  2737. DeletedSMax = true;
  2738. }
  2739. if (DeletedSMax)
  2740. return getSMaxExpr(Ops);
  2741. }
  2742. // Okay, check to see if the same value occurs in the operand list twice. If
  2743. // so, delete one. Since we sorted the list, these values are required to
  2744. // be adjacent.
  2745. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
  2746. // X smax Y smax Y --> X smax Y
  2747. // X smax Y --> X, if X is always greater than Y
  2748. if (Ops[i] == Ops[i+1] ||
  2749. isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
  2750. Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
  2751. --i; --e;
  2752. } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
  2753. Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
  2754. --i; --e;
  2755. }
  2756. if (Ops.size() == 1) return Ops[0];
  2757. assert(!Ops.empty() && "Reduced smax down to nothing!");
  2758. // Okay, it looks like we really DO need an smax expr. Check to see if we
  2759. // already have one, otherwise create a new one.
  2760. FoldingSetNodeID ID;
  2761. ID.AddInteger(scSMaxExpr);
  2762. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2763. ID.AddPointer(Ops[i]);
  2764. void *IP = nullptr;
  2765. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  2766. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2767. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2768. SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
  2769. O, Ops.size());
  2770. UniqueSCEVs.InsertNode(S, IP);
  2771. return S;
  2772. }
  2773. const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
  2774. const SCEV *RHS) {
  2775. SmallVector<const SCEV *, 2> Ops;
  2776. Ops.push_back(LHS);
  2777. Ops.push_back(RHS);
  2778. return getUMaxExpr(Ops);
  2779. }
  2780. const SCEV *
  2781. ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
  2782. assert(!Ops.empty() && "Cannot get empty umax!");
  2783. if (Ops.size() == 1) return Ops[0];
  2784. #ifndef NDEBUG
  2785. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  2786. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  2787. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  2788. "SCEVUMaxExpr operand types don't match!");
  2789. #endif
  2790. // Sort by complexity, this groups all similar expression types together.
  2791. GroupByComplexity(Ops, LI);
  2792. // If there are any constants, fold them together.
  2793. unsigned Idx = 0;
  2794. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  2795. ++Idx;
  2796. assert(Idx < Ops.size());
  2797. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  2798. // We found two constants, fold them together!
  2799. ConstantInt *Fold = ConstantInt::get(getContext(),
  2800. APIntOps::umax(LHSC->getValue()->getValue(),
  2801. RHSC->getValue()->getValue()));
  2802. Ops[0] = getConstant(Fold);
  2803. Ops.erase(Ops.begin()+1); // Erase the folded element
  2804. if (Ops.size() == 1) return Ops[0];
  2805. LHSC = cast<SCEVConstant>(Ops[0]);
  2806. }
  2807. // If we are left with a constant minimum-int, strip it off.
  2808. if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
  2809. Ops.erase(Ops.begin());
  2810. --Idx;
  2811. } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
  2812. // If we have an umax with a constant maximum-int, it will always be
  2813. // maximum-int.
  2814. return Ops[0];
  2815. }
  2816. if (Ops.size() == 1) return Ops[0];
  2817. }
  2818. // Find the first UMax
  2819. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
  2820. ++Idx;
  2821. // Check to see if one of the operands is a UMax. If so, expand its operands
  2822. // onto our operand list, and recurse to simplify.
  2823. if (Idx < Ops.size()) {
  2824. bool DeletedUMax = false;
  2825. while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
  2826. Ops.erase(Ops.begin()+Idx);
  2827. Ops.append(UMax->op_begin(), UMax->op_end());
  2828. DeletedUMax = true;
  2829. }
  2830. if (DeletedUMax)
  2831. return getUMaxExpr(Ops);
  2832. }
  2833. // Okay, check to see if the same value occurs in the operand list twice. If
  2834. // so, delete one. Since we sorted the list, these values are required to
  2835. // be adjacent.
  2836. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
  2837. // X umax Y umax Y --> X umax Y
  2838. // X umax Y --> X, if X is always greater than Y
  2839. if (Ops[i] == Ops[i+1] ||
  2840. isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
  2841. Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
  2842. --i; --e;
  2843. } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
  2844. Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
  2845. --i; --e;
  2846. }
  2847. if (Ops.size() == 1) return Ops[0];
  2848. assert(!Ops.empty() && "Reduced umax down to nothing!");
  2849. // Okay, it looks like we really DO need a umax expr. Check to see if we
  2850. // already have one, otherwise create a new one.
  2851. FoldingSetNodeID ID;
  2852. ID.AddInteger(scUMaxExpr);
  2853. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2854. ID.AddPointer(Ops[i]);
  2855. void *IP = nullptr;
  2856. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  2857. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2858. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2859. SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
  2860. O, Ops.size());
  2861. UniqueSCEVs.InsertNode(S, IP);
  2862. return S;
  2863. }
  2864. const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
  2865. const SCEV *RHS) {
  2866. // ~smax(~x, ~y) == smin(x, y).
  2867. return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
  2868. }
  2869. const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
  2870. const SCEV *RHS) {
  2871. // ~umax(~x, ~y) == umin(x, y)
  2872. return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
  2873. }
  2874. const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
  2875. // We can bypass creating a target-independent
  2876. // constant expression and then folding it back into a ConstantInt.
  2877. // This is just a compile-time optimization.
  2878. return getConstant(IntTy,
  2879. F->getParent()->getDataLayout().getTypeAllocSize(AllocTy));
  2880. }
  2881. const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
  2882. StructType *STy,
  2883. unsigned FieldNo) {
  2884. // We can bypass creating a target-independent
  2885. // constant expression and then folding it back into a ConstantInt.
  2886. // This is just a compile-time optimization.
  2887. return getConstant(
  2888. IntTy,
  2889. F->getParent()->getDataLayout().getStructLayout(STy)->getElementOffset(
  2890. FieldNo));
  2891. }
  2892. const SCEV *ScalarEvolution::getUnknown(Value *V) {
  2893. // Don't attempt to do anything other than create a SCEVUnknown object
  2894. // here. createSCEV only calls getUnknown after checking for all other
  2895. // interesting possibilities, and any other code that calls getUnknown
  2896. // is doing so in order to hide a value from SCEV canonicalization.
  2897. FoldingSetNodeID ID;
  2898. ID.AddInteger(scUnknown);
  2899. ID.AddPointer(V);
  2900. void *IP = nullptr;
  2901. if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
  2902. assert(cast<SCEVUnknown>(S)->getValue() == V &&
  2903. "Stale SCEVUnknown in uniquing map!");
  2904. return S;
  2905. }
  2906. SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
  2907. FirstUnknown);
  2908. FirstUnknown = cast<SCEVUnknown>(S);
  2909. UniqueSCEVs.InsertNode(S, IP);
  2910. return S;
  2911. }
  2912. //===----------------------------------------------------------------------===//
  2913. // Basic SCEV Analysis and PHI Idiom Recognition Code
  2914. //
  2915. /// isSCEVable - Test if values of the given type are analyzable within
  2916. /// the SCEV framework. This primarily includes integer types, and it
  2917. /// can optionally include pointer types if the ScalarEvolution class
  2918. /// has access to target-specific information.
  2919. bool ScalarEvolution::isSCEVable(Type *Ty) const {
  2920. // Integers and pointers are always SCEVable.
  2921. return Ty->isIntegerTy() || Ty->isPointerTy();
  2922. }
  2923. /// getTypeSizeInBits - Return the size in bits of the specified type,
  2924. /// for which isSCEVable must return true.
  2925. uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
  2926. assert(isSCEVable(Ty) && "Type is not SCEVable!");
  2927. return F->getParent()->getDataLayout().getTypeSizeInBits(Ty);
  2928. }
  2929. /// getEffectiveSCEVType - Return a type with the same bitwidth as
  2930. /// the given type and which represents how SCEV will treat the given
  2931. /// type, for which isSCEVable must return true. For pointer types,
  2932. /// this is the pointer-sized integer type.
  2933. Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
  2934. assert(isSCEVable(Ty) && "Type is not SCEVable!");
  2935. if (Ty->isIntegerTy()) {
  2936. return Ty;
  2937. }
  2938. // The only other support type is pointer.
  2939. assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
  2940. return F->getParent()->getDataLayout().getIntPtrType(Ty);
  2941. }
  2942. const SCEV *ScalarEvolution::getCouldNotCompute() {
  2943. return &CouldNotCompute;
  2944. }
  2945. namespace {
  2946. // Helper class working with SCEVTraversal to figure out if a SCEV contains
  2947. // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne
  2948. // is set iff if find such SCEVUnknown.
  2949. //
  2950. struct FindInvalidSCEVUnknown {
  2951. bool FindOne;
  2952. FindInvalidSCEVUnknown() { FindOne = false; }
  2953. bool follow(const SCEV *S) {
  2954. switch (static_cast<SCEVTypes>(S->getSCEVType())) {
  2955. case scConstant:
  2956. return false;
  2957. case scUnknown:
  2958. if (!cast<SCEVUnknown>(S)->getValue())
  2959. FindOne = true;
  2960. return false;
  2961. default:
  2962. return true;
  2963. }
  2964. }
  2965. bool isDone() const { return FindOne; }
  2966. };
  2967. }
  2968. bool ScalarEvolution::checkValidity(const SCEV *S) const {
  2969. FindInvalidSCEVUnknown F;
  2970. SCEVTraversal<FindInvalidSCEVUnknown> ST(F);
  2971. ST.visitAll(S);
  2972. return !F.FindOne;
  2973. }
  2974. /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
  2975. /// expression and create a new one.
  2976. const SCEV *ScalarEvolution::getSCEV(Value *V) {
  2977. assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
  2978. ValueExprMapType::iterator I = ValueExprMap.find_as(V);
  2979. if (I != ValueExprMap.end()) {
  2980. const SCEV *S = I->second;
  2981. if (checkValidity(S))
  2982. return S;
  2983. else
  2984. ValueExprMap.erase(I);
  2985. }
  2986. const SCEV *S = createSCEV(V);
  2987. // The process of creating a SCEV for V may have caused other SCEVs
  2988. // to have been created, so it's necessary to insert the new entry
  2989. // from scratch, rather than trying to remember the insert position
  2990. // above.
  2991. ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
  2992. return S;
  2993. }
  2994. /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
  2995. ///
  2996. const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
  2997. if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
  2998. return getConstant(
  2999. cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
  3000. Type *Ty = V->getType();
  3001. Ty = getEffectiveSCEVType(Ty);
  3002. return getMulExpr(V,
  3003. getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
  3004. }
  3005. /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
  3006. const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
  3007. if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
  3008. return getConstant(
  3009. cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
  3010. Type *Ty = V->getType();
  3011. Ty = getEffectiveSCEVType(Ty);
  3012. const SCEV *AllOnes =
  3013. getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
  3014. return getMinusSCEV(AllOnes, V);
  3015. }
  3016. /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
  3017. const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
  3018. SCEV::NoWrapFlags Flags) {
  3019. assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
  3020. // Fast path: X - X --> 0.
  3021. if (LHS == RHS)
  3022. return getConstant(LHS->getType(), 0);
  3023. // X - Y --> X + -Y.
  3024. // X -(nsw || nuw) Y --> X + -Y.
  3025. return getAddExpr(LHS, getNegativeSCEV(RHS));
  3026. }
  3027. /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
  3028. /// input value to the specified type. If the type must be extended, it is zero
  3029. /// extended.
  3030. const SCEV *
  3031. ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
  3032. Type *SrcTy = V->getType();
  3033. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3034. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3035. "Cannot truncate or zero extend with non-integer arguments!");
  3036. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3037. return V; // No conversion
  3038. if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
  3039. return getTruncateExpr(V, Ty);
  3040. return getZeroExtendExpr(V, Ty);
  3041. }
  3042. /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
  3043. /// input value to the specified type. If the type must be extended, it is sign
  3044. /// extended.
  3045. const SCEV *
  3046. ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
  3047. Type *Ty) {
  3048. Type *SrcTy = V->getType();
  3049. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3050. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3051. "Cannot truncate or zero extend with non-integer arguments!");
  3052. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3053. return V; // No conversion
  3054. if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
  3055. return getTruncateExpr(V, Ty);
  3056. return getSignExtendExpr(V, Ty);
  3057. }
  3058. /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
  3059. /// input value to the specified type. If the type must be extended, it is zero
  3060. /// extended. The conversion must not be narrowing.
  3061. const SCEV *
  3062. ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
  3063. Type *SrcTy = V->getType();
  3064. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3065. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3066. "Cannot noop or zero extend with non-integer arguments!");
  3067. assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
  3068. "getNoopOrZeroExtend cannot truncate!");
  3069. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3070. return V; // No conversion
  3071. return getZeroExtendExpr(V, Ty);
  3072. }
  3073. /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
  3074. /// input value to the specified type. If the type must be extended, it is sign
  3075. /// extended. The conversion must not be narrowing.
  3076. const SCEV *
  3077. ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
  3078. Type *SrcTy = V->getType();
  3079. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3080. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3081. "Cannot noop or sign extend with non-integer arguments!");
  3082. assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
  3083. "getNoopOrSignExtend cannot truncate!");
  3084. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3085. return V; // No conversion
  3086. return getSignExtendExpr(V, Ty);
  3087. }
  3088. /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
  3089. /// the input value to the specified type. If the type must be extended,
  3090. /// it is extended with unspecified bits. The conversion must not be
  3091. /// narrowing.
  3092. const SCEV *
  3093. ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
  3094. Type *SrcTy = V->getType();
  3095. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3096. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3097. "Cannot noop or any extend with non-integer arguments!");
  3098. assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
  3099. "getNoopOrAnyExtend cannot truncate!");
  3100. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3101. return V; // No conversion
  3102. return getAnyExtendExpr(V, Ty);
  3103. }
  3104. /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
  3105. /// input value to the specified type. The conversion must not be widening.
  3106. const SCEV *
  3107. ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
  3108. Type *SrcTy = V->getType();
  3109. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3110. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3111. "Cannot truncate or noop with non-integer arguments!");
  3112. assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
  3113. "getTruncateOrNoop cannot extend!");
  3114. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3115. return V; // No conversion
  3116. return getTruncateExpr(V, Ty);
  3117. }
  3118. /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
  3119. /// the types using zero-extension, and then perform a umax operation
  3120. /// with them.
  3121. const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
  3122. const SCEV *RHS) {
  3123. const SCEV *PromotedLHS = LHS;
  3124. const SCEV *PromotedRHS = RHS;
  3125. if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
  3126. PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
  3127. else
  3128. PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
  3129. return getUMaxExpr(PromotedLHS, PromotedRHS);
  3130. }
  3131. /// getUMinFromMismatchedTypes - Promote the operands to the wider of
  3132. /// the types using zero-extension, and then perform a umin operation
  3133. /// with them.
  3134. const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
  3135. const SCEV *RHS) {
  3136. const SCEV *PromotedLHS = LHS;
  3137. const SCEV *PromotedRHS = RHS;
  3138. if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
  3139. PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
  3140. else
  3141. PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
  3142. return getUMinExpr(PromotedLHS, PromotedRHS);
  3143. }
  3144. /// getPointerBase - Transitively follow the chain of pointer-type operands
  3145. /// until reaching a SCEV that does not have a single pointer operand. This
  3146. /// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
  3147. /// but corner cases do exist.
  3148. const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
  3149. // A pointer operand may evaluate to a nonpointer expression, such as null.
  3150. if (!V->getType()->isPointerTy())
  3151. return V;
  3152. if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
  3153. return getPointerBase(Cast->getOperand());
  3154. }
  3155. else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
  3156. const SCEV *PtrOp = nullptr;
  3157. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  3158. I != E; ++I) {
  3159. if ((*I)->getType()->isPointerTy()) {
  3160. // Cannot find the base of an expression with multiple pointer operands.
  3161. if (PtrOp)
  3162. return V;
  3163. PtrOp = *I;
  3164. }
  3165. }
  3166. if (!PtrOp)
  3167. return V;
  3168. return getPointerBase(PtrOp);
  3169. }
  3170. return V;
  3171. }
  3172. /// PushDefUseChildren - Push users of the given Instruction
  3173. /// onto the given Worklist.
  3174. static void
  3175. PushDefUseChildren(Instruction *I,
  3176. SmallVectorImpl<Instruction *> &Worklist) {
  3177. // Push the def-use children onto the Worklist stack.
  3178. for (User *U : I->users())
  3179. Worklist.push_back(cast<Instruction>(U));
  3180. }
  3181. /// ForgetSymbolicValue - This looks up computed SCEV values for all
  3182. /// instructions that depend on the given instruction and removes them from
  3183. /// the ValueExprMapType map if they reference SymName. This is used during PHI
  3184. /// resolution.
  3185. void
  3186. ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
  3187. SmallVector<Instruction *, 16> Worklist;
  3188. PushDefUseChildren(PN, Worklist);
  3189. SmallPtrSet<Instruction *, 8> Visited;
  3190. Visited.insert(PN);
  3191. while (!Worklist.empty()) {
  3192. Instruction *I = Worklist.pop_back_val();
  3193. if (!Visited.insert(I).second)
  3194. continue;
  3195. ValueExprMapType::iterator It =
  3196. ValueExprMap.find_as(static_cast<Value *>(I));
  3197. if (It != ValueExprMap.end()) {
  3198. const SCEV *Old = It->second;
  3199. // Short-circuit the def-use traversal if the symbolic name
  3200. // ceases to appear in expressions.
  3201. if (Old != SymName && !hasOperand(Old, SymName))
  3202. continue;
  3203. // SCEVUnknown for a PHI either means that it has an unrecognized
  3204. // structure, it's a PHI that's in the progress of being computed
  3205. // by createNodeForPHI, or it's a single-value PHI. In the first case,
  3206. // additional loop trip count information isn't going to change anything.
  3207. // In the second case, createNodeForPHI will perform the necessary
  3208. // updates on its own when it gets to that point. In the third, we do
  3209. // want to forget the SCEVUnknown.
  3210. if (!isa<PHINode>(I) ||
  3211. !isa<SCEVUnknown>(Old) ||
  3212. (I != PN && Old == SymName)) {
  3213. forgetMemoizedResults(Old);
  3214. ValueExprMap.erase(It);
  3215. }
  3216. }
  3217. PushDefUseChildren(I, Worklist);
  3218. }
  3219. }
  3220. /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
  3221. /// a loop header, making it a potential recurrence, or it doesn't.
  3222. ///
  3223. const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
  3224. if (const Loop *L = LI->getLoopFor(PN->getParent()))
  3225. if (L->getHeader() == PN->getParent()) {
  3226. // The loop may have multiple entrances or multiple exits; we can analyze
  3227. // this phi as an addrec if it has a unique entry value and a unique
  3228. // backedge value.
  3229. Value *BEValueV = nullptr, *StartValueV = nullptr;
  3230. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
  3231. Value *V = PN->getIncomingValue(i);
  3232. if (L->contains(PN->getIncomingBlock(i))) {
  3233. if (!BEValueV) {
  3234. BEValueV = V;
  3235. } else if (BEValueV != V) {
  3236. BEValueV = nullptr;
  3237. break;
  3238. }
  3239. } else if (!StartValueV) {
  3240. StartValueV = V;
  3241. } else if (StartValueV != V) {
  3242. StartValueV = nullptr;
  3243. break;
  3244. }
  3245. }
  3246. if (BEValueV && StartValueV) {
  3247. // While we are analyzing this PHI node, handle its value symbolically.
  3248. const SCEV *SymbolicName = getUnknown(PN);
  3249. assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
  3250. "PHI node already processed?");
  3251. ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
  3252. // Using this symbolic name for the PHI, analyze the value coming around
  3253. // the back-edge.
  3254. const SCEV *BEValue = getSCEV(BEValueV);
  3255. // NOTE: If BEValue is loop invariant, we know that the PHI node just
  3256. // has a special value for the first iteration of the loop.
  3257. // If the value coming around the backedge is an add with the symbolic
  3258. // value we just inserted, then we found a simple induction variable!
  3259. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
  3260. // If there is a single occurrence of the symbolic value, replace it
  3261. // with a recurrence.
  3262. unsigned FoundIndex = Add->getNumOperands();
  3263. for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
  3264. if (Add->getOperand(i) == SymbolicName)
  3265. if (FoundIndex == e) {
  3266. FoundIndex = i;
  3267. break;
  3268. }
  3269. if (FoundIndex != Add->getNumOperands()) {
  3270. // Create an add with everything but the specified operand.
  3271. SmallVector<const SCEV *, 8> Ops;
  3272. for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
  3273. if (i != FoundIndex)
  3274. Ops.push_back(Add->getOperand(i));
  3275. const SCEV *Accum = getAddExpr(Ops);
  3276. // This is not a valid addrec if the step amount is varying each
  3277. // loop iteration, but is not itself an addrec in this loop.
  3278. if (isLoopInvariant(Accum, L) ||
  3279. (isa<SCEVAddRecExpr>(Accum) &&
  3280. cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
  3281. SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
  3282. // If the increment doesn't overflow, then neither the addrec nor
  3283. // the post-increment will overflow.
  3284. if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
  3285. if (OBO->getOperand(0) == PN) {
  3286. if (OBO->hasNoUnsignedWrap())
  3287. Flags = setFlags(Flags, SCEV::FlagNUW);
  3288. if (OBO->hasNoSignedWrap())
  3289. Flags = setFlags(Flags, SCEV::FlagNSW);
  3290. }
  3291. } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
  3292. // If the increment is an inbounds GEP, then we know the address
  3293. // space cannot be wrapped around. We cannot make any guarantee
  3294. // about signed or unsigned overflow because pointers are
  3295. // unsigned but we may have a negative index from the base
  3296. // pointer. We can guarantee that no unsigned wrap occurs if the
  3297. // indices form a positive value.
  3298. if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
  3299. Flags = setFlags(Flags, SCEV::FlagNW);
  3300. const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
  3301. if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
  3302. Flags = setFlags(Flags, SCEV::FlagNUW);
  3303. }
  3304. // We cannot transfer nuw and nsw flags from subtraction
  3305. // operations -- sub nuw X, Y is not the same as add nuw X, -Y
  3306. // for instance.
  3307. }
  3308. const SCEV *StartVal = getSCEV(StartValueV);
  3309. const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
  3310. // Since the no-wrap flags are on the increment, they apply to the
  3311. // post-incremented value as well.
  3312. if (isLoopInvariant(Accum, L))
  3313. (void)getAddRecExpr(getAddExpr(StartVal, Accum),
  3314. Accum, L, Flags);
  3315. // Okay, for the entire analysis of this edge we assumed the PHI
  3316. // to be symbolic. We now need to go back and purge all of the
  3317. // entries for the scalars that use the symbolic expression.
  3318. ForgetSymbolicName(PN, SymbolicName);
  3319. ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
  3320. return PHISCEV;
  3321. }
  3322. }
  3323. } else if (const SCEVAddRecExpr *AddRec =
  3324. dyn_cast<SCEVAddRecExpr>(BEValue)) {
  3325. // Otherwise, this could be a loop like this:
  3326. // i = 0; for (j = 1; ..; ++j) { .... i = j; }
  3327. // In this case, j = {1,+,1} and BEValue is j.
  3328. // Because the other in-value of i (0) fits the evolution of BEValue
  3329. // i really is an addrec evolution.
  3330. if (AddRec->getLoop() == L && AddRec->isAffine()) {
  3331. const SCEV *StartVal = getSCEV(StartValueV);
  3332. // If StartVal = j.start - j.stride, we can use StartVal as the
  3333. // initial step of the addrec evolution.
  3334. if (StartVal == getMinusSCEV(AddRec->getOperand(0),
  3335. AddRec->getOperand(1))) {
  3336. // FIXME: For constant StartVal, we should be able to infer
  3337. // no-wrap flags.
  3338. const SCEV *PHISCEV =
  3339. getAddRecExpr(StartVal, AddRec->getOperand(1), L,
  3340. SCEV::FlagAnyWrap);
  3341. // Okay, for the entire analysis of this edge we assumed the PHI
  3342. // to be symbolic. We now need to go back and purge all of the
  3343. // entries for the scalars that use the symbolic expression.
  3344. ForgetSymbolicName(PN, SymbolicName);
  3345. ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
  3346. return PHISCEV;
  3347. }
  3348. }
  3349. }
  3350. }
  3351. }
  3352. // If the PHI has a single incoming value, follow that value, unless the
  3353. // PHI's incoming blocks are in a different loop, in which case doing so
  3354. // risks breaking LCSSA form. Instcombine would normally zap these, but
  3355. // it doesn't have DominatorTree information, so it may miss cases.
  3356. if (Value *V =
  3357. SimplifyInstruction(PN, F->getParent()->getDataLayout(), TLI, DT, AC))
  3358. if (LI->replacementPreservesLCSSAForm(PN, V))
  3359. return getSCEV(V);
  3360. // If it's not a loop phi, we can't handle it yet.
  3361. return getUnknown(PN);
  3362. }
  3363. /// createNodeForGEP - Expand GEP instructions into add and multiply
  3364. /// operations. This allows them to be analyzed by regular SCEV code.
  3365. ///
  3366. const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
  3367. Value *Base = GEP->getOperand(0);
  3368. // Don't attempt to analyze GEPs over unsized objects.
  3369. if (!Base->getType()->getPointerElementType()->isSized())
  3370. return getUnknown(GEP);
  3371. SmallVector<const SCEV *, 4> IndexExprs;
  3372. for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
  3373. IndexExprs.push_back(getSCEV(*Index));
  3374. return getGEPExpr(GEP->getSourceElementType(), getSCEV(Base), IndexExprs,
  3375. GEP->isInBounds());
  3376. }
  3377. /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
  3378. /// guaranteed to end in (at every loop iteration). It is, at the same time,
  3379. /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
  3380. /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
  3381. uint32_t
  3382. ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
  3383. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
  3384. return C->getValue()->getValue().countTrailingZeros();
  3385. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
  3386. return std::min(GetMinTrailingZeros(T->getOperand()),
  3387. (uint32_t)getTypeSizeInBits(T->getType()));
  3388. if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
  3389. uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
  3390. return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
  3391. getTypeSizeInBits(E->getType()) : OpRes;
  3392. }
  3393. if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
  3394. uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
  3395. return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
  3396. getTypeSizeInBits(E->getType()) : OpRes;
  3397. }
  3398. if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
  3399. // The result is the min of all operands results.
  3400. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
  3401. for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
  3402. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
  3403. return MinOpRes;
  3404. }
  3405. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
  3406. // The result is the sum of all operands results.
  3407. uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
  3408. uint32_t BitWidth = getTypeSizeInBits(M->getType());
  3409. for (unsigned i = 1, e = M->getNumOperands();
  3410. SumOpRes != BitWidth && i != e; ++i)
  3411. SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
  3412. BitWidth);
  3413. return SumOpRes;
  3414. }
  3415. if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
  3416. // The result is the min of all operands results.
  3417. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
  3418. for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
  3419. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
  3420. return MinOpRes;
  3421. }
  3422. if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
  3423. // The result is the min of all operands results.
  3424. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
  3425. for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
  3426. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
  3427. return MinOpRes;
  3428. }
  3429. if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
  3430. // The result is the min of all operands results.
  3431. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
  3432. for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
  3433. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
  3434. return MinOpRes;
  3435. }
  3436. if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
  3437. // For a SCEVUnknown, ask ValueTracking.
  3438. unsigned BitWidth = getTypeSizeInBits(U->getType());
  3439. APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
  3440. computeKnownBits(U->getValue(), Zeros, Ones,
  3441. F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
  3442. return Zeros.countTrailingOnes();
  3443. }
  3444. // SCEVUDivExpr
  3445. return 0;
  3446. }
  3447. /// GetRangeFromMetadata - Helper method to assign a range to V from
  3448. /// metadata present in the IR.
  3449. static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
  3450. if (Instruction *I = dyn_cast<Instruction>(V)) {
  3451. if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) {
  3452. ConstantRange TotalRange(
  3453. cast<IntegerType>(I->getType())->getBitWidth(), false);
  3454. unsigned NumRanges = MD->getNumOperands() / 2;
  3455. assert(NumRanges >= 1);
  3456. for (unsigned i = 0; i < NumRanges; ++i) {
  3457. ConstantInt *Lower =
  3458. mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 0));
  3459. ConstantInt *Upper =
  3460. mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 1));
  3461. ConstantRange Range(Lower->getValue(), Upper->getValue());
  3462. TotalRange = TotalRange.unionWith(Range);
  3463. }
  3464. return TotalRange;
  3465. }
  3466. }
  3467. return None;
  3468. }
  3469. /// getRange - Determine the range for a particular SCEV. If SignHint is
  3470. /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
  3471. /// with a "cleaner" unsigned (resp. signed) representation.
  3472. ///
  3473. ConstantRange
  3474. ScalarEvolution::getRange(const SCEV *S,
  3475. ScalarEvolution::RangeSignHint SignHint) {
  3476. DenseMap<const SCEV *, ConstantRange> &Cache =
  3477. SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
  3478. : SignedRanges;
  3479. // See if we've computed this range already.
  3480. DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
  3481. if (I != Cache.end())
  3482. return I->second;
  3483. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
  3484. return setRange(C, SignHint, ConstantRange(C->getValue()->getValue()));
  3485. unsigned BitWidth = getTypeSizeInBits(S->getType());
  3486. ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
  3487. // If the value has known zeros, the maximum value will have those known zeros
  3488. // as well.
  3489. uint32_t TZ = GetMinTrailingZeros(S);
  3490. if (TZ != 0) {
  3491. if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
  3492. ConservativeResult =
  3493. ConstantRange(APInt::getMinValue(BitWidth),
  3494. APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
  3495. else
  3496. ConservativeResult = ConstantRange(
  3497. APInt::getSignedMinValue(BitWidth),
  3498. APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
  3499. }
  3500. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
  3501. ConstantRange X = getRange(Add->getOperand(0), SignHint);
  3502. for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
  3503. X = X.add(getRange(Add->getOperand(i), SignHint));
  3504. return setRange(Add, SignHint, ConservativeResult.intersectWith(X));
  3505. }
  3506. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
  3507. ConstantRange X = getRange(Mul->getOperand(0), SignHint);
  3508. for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
  3509. X = X.multiply(getRange(Mul->getOperand(i), SignHint));
  3510. return setRange(Mul, SignHint, ConservativeResult.intersectWith(X));
  3511. }
  3512. if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
  3513. ConstantRange X = getRange(SMax->getOperand(0), SignHint);
  3514. for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
  3515. X = X.smax(getRange(SMax->getOperand(i), SignHint));
  3516. return setRange(SMax, SignHint, ConservativeResult.intersectWith(X));
  3517. }
  3518. if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
  3519. ConstantRange X = getRange(UMax->getOperand(0), SignHint);
  3520. for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
  3521. X = X.umax(getRange(UMax->getOperand(i), SignHint));
  3522. return setRange(UMax, SignHint, ConservativeResult.intersectWith(X));
  3523. }
  3524. if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
  3525. ConstantRange X = getRange(UDiv->getLHS(), SignHint);
  3526. ConstantRange Y = getRange(UDiv->getRHS(), SignHint);
  3527. return setRange(UDiv, SignHint,
  3528. ConservativeResult.intersectWith(X.udiv(Y)));
  3529. }
  3530. if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
  3531. ConstantRange X = getRange(ZExt->getOperand(), SignHint);
  3532. return setRange(ZExt, SignHint,
  3533. ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
  3534. }
  3535. if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
  3536. ConstantRange X = getRange(SExt->getOperand(), SignHint);
  3537. return setRange(SExt, SignHint,
  3538. ConservativeResult.intersectWith(X.signExtend(BitWidth)));
  3539. }
  3540. if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
  3541. ConstantRange X = getRange(Trunc->getOperand(), SignHint);
  3542. return setRange(Trunc, SignHint,
  3543. ConservativeResult.intersectWith(X.truncate(BitWidth)));
  3544. }
  3545. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
  3546. // If there's no unsigned wrap, the value will never be less than its
  3547. // initial value.
  3548. if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
  3549. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
  3550. if (!C->getValue()->isZero())
  3551. ConservativeResult =
  3552. ConservativeResult.intersectWith(
  3553. ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
  3554. // If there's no signed wrap, and all the operands have the same sign or
  3555. // zero, the value won't ever change sign.
  3556. if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
  3557. bool AllNonNeg = true;
  3558. bool AllNonPos = true;
  3559. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
  3560. if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
  3561. if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
  3562. }
  3563. if (AllNonNeg)
  3564. ConservativeResult = ConservativeResult.intersectWith(
  3565. ConstantRange(APInt(BitWidth, 0),
  3566. APInt::getSignedMinValue(BitWidth)));
  3567. else if (AllNonPos)
  3568. ConservativeResult = ConservativeResult.intersectWith(
  3569. ConstantRange(APInt::getSignedMinValue(BitWidth),
  3570. APInt(BitWidth, 1)));
  3571. }
  3572. // TODO: non-affine addrec
  3573. if (AddRec->isAffine()) {
  3574. Type *Ty = AddRec->getType();
  3575. const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
  3576. if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
  3577. getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
  3578. // Check for overflow. This must be done with ConstantRange arithmetic
  3579. // because we could be called from within the ScalarEvolution overflow
  3580. // checking code.
  3581. MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
  3582. ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
  3583. ConstantRange ZExtMaxBECountRange =
  3584. MaxBECountRange.zextOrTrunc(BitWidth * 2 + 1);
  3585. const SCEV *Start = AddRec->getStart();
  3586. const SCEV *Step = AddRec->getStepRecurrence(*this);
  3587. ConstantRange StepSRange = getSignedRange(Step);
  3588. ConstantRange SExtStepSRange = StepSRange.sextOrTrunc(BitWidth * 2 + 1);
  3589. ConstantRange StartURange = getUnsignedRange(Start);
  3590. ConstantRange EndURange =
  3591. StartURange.add(MaxBECountRange.multiply(StepSRange));
  3592. // Check for unsigned overflow.
  3593. ConstantRange ZExtStartURange =
  3594. StartURange.zextOrTrunc(BitWidth * 2 + 1);
  3595. ConstantRange ZExtEndURange = EndURange.zextOrTrunc(BitWidth * 2 + 1);
  3596. if (ZExtStartURange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) ==
  3597. ZExtEndURange) {
  3598. APInt Min = APIntOps::umin(StartURange.getUnsignedMin(),
  3599. EndURange.getUnsignedMin());
  3600. APInt Max = APIntOps::umax(StartURange.getUnsignedMax(),
  3601. EndURange.getUnsignedMax());
  3602. bool IsFullRange = Min.isMinValue() && Max.isMaxValue();
  3603. if (!IsFullRange)
  3604. ConservativeResult =
  3605. ConservativeResult.intersectWith(ConstantRange(Min, Max + 1));
  3606. }
  3607. ConstantRange StartSRange = getSignedRange(Start);
  3608. ConstantRange EndSRange =
  3609. StartSRange.add(MaxBECountRange.multiply(StepSRange));
  3610. // Check for signed overflow. This must be done with ConstantRange
  3611. // arithmetic because we could be called from within the ScalarEvolution
  3612. // overflow checking code.
  3613. ConstantRange SExtStartSRange =
  3614. StartSRange.sextOrTrunc(BitWidth * 2 + 1);
  3615. ConstantRange SExtEndSRange = EndSRange.sextOrTrunc(BitWidth * 2 + 1);
  3616. if (SExtStartSRange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) ==
  3617. SExtEndSRange) {
  3618. APInt Min = APIntOps::smin(StartSRange.getSignedMin(),
  3619. EndSRange.getSignedMin());
  3620. APInt Max = APIntOps::smax(StartSRange.getSignedMax(),
  3621. EndSRange.getSignedMax());
  3622. bool IsFullRange = Min.isMinSignedValue() && Max.isMaxSignedValue();
  3623. if (!IsFullRange)
  3624. ConservativeResult =
  3625. ConservativeResult.intersectWith(ConstantRange(Min, Max + 1));
  3626. }
  3627. }
  3628. }
  3629. return setRange(AddRec, SignHint, ConservativeResult);
  3630. }
  3631. if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
  3632. // Check if the IR explicitly contains !range metadata.
  3633. Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
  3634. if (MDRange.hasValue())
  3635. ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue());
  3636. // Split here to avoid paying the compile-time cost of calling both
  3637. // computeKnownBits and ComputeNumSignBits. This restriction can be lifted
  3638. // if needed.
  3639. const DataLayout &DL = F->getParent()->getDataLayout();
  3640. if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
  3641. // For a SCEVUnknown, ask ValueTracking.
  3642. APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
  3643. computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, AC, nullptr, DT);
  3644. if (Ones != ~Zeros + 1)
  3645. ConservativeResult =
  3646. ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
  3647. } else {
  3648. assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
  3649. "generalize as needed!");
  3650. unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT);
  3651. if (NS > 1)
  3652. ConservativeResult = ConservativeResult.intersectWith(
  3653. ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
  3654. APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
  3655. }
  3656. return setRange(U, SignHint, ConservativeResult);
  3657. }
  3658. return setRange(S, SignHint, ConservativeResult);
  3659. }
  3660. /// createSCEV - We know that there is no SCEV for the specified value.
  3661. /// Analyze the expression.
  3662. ///
  3663. const SCEV *ScalarEvolution::createSCEV(Value *V) {
  3664. if (!isSCEVable(V->getType()))
  3665. return getUnknown(V);
  3666. unsigned Opcode = Instruction::UserOp1;
  3667. if (Instruction *I = dyn_cast<Instruction>(V)) {
  3668. Opcode = I->getOpcode();
  3669. // Don't attempt to analyze instructions in blocks that aren't
  3670. // reachable. Such instructions don't matter, and they aren't required
  3671. // to obey basic rules for definitions dominating uses which this
  3672. // analysis depends on.
  3673. if (!DT->isReachableFromEntry(I->getParent()))
  3674. return getUnknown(V);
  3675. } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
  3676. Opcode = CE->getOpcode();
  3677. else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
  3678. return getConstant(CI);
  3679. else if (isa<ConstantPointerNull>(V))
  3680. return getConstant(V->getType(), 0);
  3681. else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
  3682. return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
  3683. else
  3684. return getUnknown(V);
  3685. Operator *U = cast<Operator>(V);
  3686. switch (Opcode) {
  3687. case Instruction::Add: {
  3688. // The simple thing to do would be to just call getSCEV on both operands
  3689. // and call getAddExpr with the result. However if we're looking at a
  3690. // bunch of things all added together, this can be quite inefficient,
  3691. // because it leads to N-1 getAddExpr calls for N ultimate operands.
  3692. // Instead, gather up all the operands and make a single getAddExpr call.
  3693. // LLVM IR canonical form means we need only traverse the left operands.
  3694. //
  3695. // Don't apply this instruction's NSW or NUW flags to the new
  3696. // expression. The instruction may be guarded by control flow that the
  3697. // no-wrap behavior depends on. Non-control-equivalent instructions can be
  3698. // mapped to the same SCEV expression, and it would be incorrect to transfer
  3699. // NSW/NUW semantics to those operations.
  3700. SmallVector<const SCEV *, 4> AddOps;
  3701. AddOps.push_back(getSCEV(U->getOperand(1)));
  3702. for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
  3703. unsigned Opcode = Op->getValueID() - Value::InstructionVal;
  3704. if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
  3705. break;
  3706. U = cast<Operator>(Op);
  3707. const SCEV *Op1 = getSCEV(U->getOperand(1));
  3708. if (Opcode == Instruction::Sub)
  3709. AddOps.push_back(getNegativeSCEV(Op1));
  3710. else
  3711. AddOps.push_back(Op1);
  3712. }
  3713. AddOps.push_back(getSCEV(U->getOperand(0)));
  3714. return getAddExpr(AddOps);
  3715. }
  3716. case Instruction::Mul: {
  3717. // Don't transfer NSW/NUW for the same reason as AddExpr.
  3718. SmallVector<const SCEV *, 4> MulOps;
  3719. MulOps.push_back(getSCEV(U->getOperand(1)));
  3720. for (Value *Op = U->getOperand(0);
  3721. Op->getValueID() == Instruction::Mul + Value::InstructionVal;
  3722. Op = U->getOperand(0)) {
  3723. U = cast<Operator>(Op);
  3724. MulOps.push_back(getSCEV(U->getOperand(1)));
  3725. }
  3726. MulOps.push_back(getSCEV(U->getOperand(0)));
  3727. return getMulExpr(MulOps);
  3728. }
  3729. case Instruction::UDiv:
  3730. return getUDivExpr(getSCEV(U->getOperand(0)),
  3731. getSCEV(U->getOperand(1)));
  3732. case Instruction::Sub:
  3733. return getMinusSCEV(getSCEV(U->getOperand(0)),
  3734. getSCEV(U->getOperand(1)));
  3735. case Instruction::And:
  3736. // For an expression like x&255 that merely masks off the high bits,
  3737. // use zext(trunc(x)) as the SCEV expression.
  3738. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3739. if (CI->isNullValue())
  3740. return getSCEV(U->getOperand(1));
  3741. if (CI->isAllOnesValue())
  3742. return getSCEV(U->getOperand(0));
  3743. const APInt &A = CI->getValue();
  3744. // Instcombine's ShrinkDemandedConstant may strip bits out of
  3745. // constants, obscuring what would otherwise be a low-bits mask.
  3746. // Use computeKnownBits to compute what ShrinkDemandedConstant
  3747. // knew about to reconstruct a low-bits mask value.
  3748. unsigned LZ = A.countLeadingZeros();
  3749. unsigned TZ = A.countTrailingZeros();
  3750. unsigned BitWidth = A.getBitWidth();
  3751. APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
  3752. computeKnownBits(U->getOperand(0), KnownZero, KnownOne,
  3753. F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
  3754. APInt EffectiveMask =
  3755. APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
  3756. if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) {
  3757. const SCEV *MulCount = getConstant(
  3758. ConstantInt::get(getContext(), APInt::getOneBitSet(BitWidth, TZ)));
  3759. return getMulExpr(
  3760. getZeroExtendExpr(
  3761. getTruncateExpr(
  3762. getUDivExactExpr(getSCEV(U->getOperand(0)), MulCount),
  3763. IntegerType::get(getContext(), BitWidth - LZ - TZ)),
  3764. U->getType()),
  3765. MulCount);
  3766. }
  3767. }
  3768. break;
  3769. case Instruction::Or:
  3770. // If the RHS of the Or is a constant, we may have something like:
  3771. // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
  3772. // optimizations will transparently handle this case.
  3773. //
  3774. // In order for this transformation to be safe, the LHS must be of the
  3775. // form X*(2^n) and the Or constant must be less than 2^n.
  3776. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3777. const SCEV *LHS = getSCEV(U->getOperand(0));
  3778. const APInt &CIVal = CI->getValue();
  3779. if (GetMinTrailingZeros(LHS) >=
  3780. (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
  3781. // Build a plain add SCEV.
  3782. const SCEV *S = getAddExpr(LHS, getSCEV(CI));
  3783. // If the LHS of the add was an addrec and it has no-wrap flags,
  3784. // transfer the no-wrap flags, since an or won't introduce a wrap.
  3785. if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
  3786. const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
  3787. const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
  3788. OldAR->getNoWrapFlags());
  3789. }
  3790. return S;
  3791. }
  3792. }
  3793. break;
  3794. case Instruction::Xor:
  3795. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3796. // If the RHS of the xor is a signbit, then this is just an add.
  3797. // Instcombine turns add of signbit into xor as a strength reduction step.
  3798. if (CI->getValue().isSignBit())
  3799. return getAddExpr(getSCEV(U->getOperand(0)),
  3800. getSCEV(U->getOperand(1)));
  3801. // If the RHS of xor is -1, then this is a not operation.
  3802. if (CI->isAllOnesValue())
  3803. return getNotSCEV(getSCEV(U->getOperand(0)));
  3804. // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
  3805. // This is a variant of the check for xor with -1, and it handles
  3806. // the case where instcombine has trimmed non-demanded bits out
  3807. // of an xor with -1.
  3808. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
  3809. if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
  3810. if (BO->getOpcode() == Instruction::And &&
  3811. LCI->getValue() == CI->getValue())
  3812. if (const SCEVZeroExtendExpr *Z =
  3813. dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
  3814. Type *UTy = U->getType();
  3815. const SCEV *Z0 = Z->getOperand();
  3816. Type *Z0Ty = Z0->getType();
  3817. unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
  3818. // If C is a low-bits mask, the zero extend is serving to
  3819. // mask off the high bits. Complement the operand and
  3820. // re-apply the zext.
  3821. if (APIntOps::isMask(Z0TySize, CI->getValue()))
  3822. return getZeroExtendExpr(getNotSCEV(Z0), UTy);
  3823. // If C is a single bit, it may be in the sign-bit position
  3824. // before the zero-extend. In this case, represent the xor
  3825. // using an add, which is equivalent, and re-apply the zext.
  3826. APInt Trunc = CI->getValue().trunc(Z0TySize);
  3827. if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
  3828. Trunc.isSignBit())
  3829. return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
  3830. UTy);
  3831. }
  3832. }
  3833. break;
  3834. case Instruction::Shl:
  3835. // Turn shift left of a constant amount into a multiply.
  3836. if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3837. uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
  3838. // If the shift count is not less than the bitwidth, the result of
  3839. // the shift is undefined. Don't try to analyze it, because the
  3840. // resolution chosen here may differ from the resolution chosen in
  3841. // other parts of the compiler.
  3842. if (SA->getValue().uge(BitWidth))
  3843. break;
  3844. Constant *X = ConstantInt::get(getContext(),
  3845. APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
  3846. return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
  3847. }
  3848. break;
  3849. case Instruction::LShr:
  3850. // Turn logical shift right of a constant into a unsigned divide.
  3851. if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3852. uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
  3853. // If the shift count is not less than the bitwidth, the result of
  3854. // the shift is undefined. Don't try to analyze it, because the
  3855. // resolution chosen here may differ from the resolution chosen in
  3856. // other parts of the compiler.
  3857. if (SA->getValue().uge(BitWidth))
  3858. break;
  3859. Constant *X = ConstantInt::get(getContext(),
  3860. APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
  3861. return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
  3862. }
  3863. break;
  3864. case Instruction::AShr:
  3865. // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
  3866. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
  3867. if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
  3868. if (L->getOpcode() == Instruction::Shl &&
  3869. L->getOperand(1) == U->getOperand(1)) {
  3870. uint64_t BitWidth = getTypeSizeInBits(U->getType());
  3871. // If the shift count is not less than the bitwidth, the result of
  3872. // the shift is undefined. Don't try to analyze it, because the
  3873. // resolution chosen here may differ from the resolution chosen in
  3874. // other parts of the compiler.
  3875. if (CI->getValue().uge(BitWidth))
  3876. break;
  3877. uint64_t Amt = BitWidth - CI->getZExtValue();
  3878. if (Amt == BitWidth)
  3879. return getSCEV(L->getOperand(0)); // shift by zero --> noop
  3880. return
  3881. getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
  3882. IntegerType::get(getContext(),
  3883. Amt)),
  3884. U->getType());
  3885. }
  3886. break;
  3887. case Instruction::Trunc:
  3888. return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
  3889. case Instruction::ZExt:
  3890. return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
  3891. case Instruction::SExt:
  3892. return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
  3893. case Instruction::BitCast:
  3894. // BitCasts are no-op casts so we just eliminate the cast.
  3895. if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
  3896. return getSCEV(U->getOperand(0));
  3897. break;
  3898. // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
  3899. // lead to pointer expressions which cannot safely be expanded to GEPs,
  3900. // because ScalarEvolution doesn't respect the GEP aliasing rules when
  3901. // simplifying integer expressions.
  3902. case Instruction::GetElementPtr:
  3903. return createNodeForGEP(cast<GEPOperator>(U));
  3904. case Instruction::PHI:
  3905. return createNodeForPHI(cast<PHINode>(U));
  3906. case Instruction::Select:
  3907. // This could be a smax or umax that was lowered earlier.
  3908. // Try to recover it.
  3909. if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
  3910. Value *LHS = ICI->getOperand(0);
  3911. Value *RHS = ICI->getOperand(1);
  3912. switch (ICI->getPredicate()) {
  3913. case ICmpInst::ICMP_SLT:
  3914. case ICmpInst::ICMP_SLE:
  3915. std::swap(LHS, RHS);
  3916. // fall through
  3917. case ICmpInst::ICMP_SGT:
  3918. case ICmpInst::ICMP_SGE:
  3919. // a >s b ? a+x : b+x -> smax(a, b)+x
  3920. // a >s b ? b+x : a+x -> smin(a, b)+x
  3921. if (getTypeSizeInBits(LHS->getType()) <=
  3922. getTypeSizeInBits(U->getType())) {
  3923. const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), U->getType());
  3924. const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), U->getType());
  3925. const SCEV *LA = getSCEV(U->getOperand(1));
  3926. const SCEV *RA = getSCEV(U->getOperand(2));
  3927. const SCEV *LDiff = getMinusSCEV(LA, LS);
  3928. const SCEV *RDiff = getMinusSCEV(RA, RS);
  3929. if (LDiff == RDiff)
  3930. return getAddExpr(getSMaxExpr(LS, RS), LDiff);
  3931. LDiff = getMinusSCEV(LA, RS);
  3932. RDiff = getMinusSCEV(RA, LS);
  3933. if (LDiff == RDiff)
  3934. return getAddExpr(getSMinExpr(LS, RS), LDiff);
  3935. }
  3936. break;
  3937. case ICmpInst::ICMP_ULT:
  3938. case ICmpInst::ICMP_ULE:
  3939. std::swap(LHS, RHS);
  3940. // fall through
  3941. case ICmpInst::ICMP_UGT:
  3942. case ICmpInst::ICMP_UGE:
  3943. // a >u b ? a+x : b+x -> umax(a, b)+x
  3944. // a >u b ? b+x : a+x -> umin(a, b)+x
  3945. if (getTypeSizeInBits(LHS->getType()) <=
  3946. getTypeSizeInBits(U->getType())) {
  3947. const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType());
  3948. const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), U->getType());
  3949. const SCEV *LA = getSCEV(U->getOperand(1));
  3950. const SCEV *RA = getSCEV(U->getOperand(2));
  3951. const SCEV *LDiff = getMinusSCEV(LA, LS);
  3952. const SCEV *RDiff = getMinusSCEV(RA, RS);
  3953. if (LDiff == RDiff)
  3954. return getAddExpr(getUMaxExpr(LS, RS), LDiff);
  3955. LDiff = getMinusSCEV(LA, RS);
  3956. RDiff = getMinusSCEV(RA, LS);
  3957. if (LDiff == RDiff)
  3958. return getAddExpr(getUMinExpr(LS, RS), LDiff);
  3959. }
  3960. break;
  3961. case ICmpInst::ICMP_NE:
  3962. // n != 0 ? n+x : 1+x -> umax(n, 1)+x
  3963. if (getTypeSizeInBits(LHS->getType()) <=
  3964. getTypeSizeInBits(U->getType()) &&
  3965. isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
  3966. const SCEV *One = getConstant(U->getType(), 1);
  3967. const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType());
  3968. const SCEV *LA = getSCEV(U->getOperand(1));
  3969. const SCEV *RA = getSCEV(U->getOperand(2));
  3970. const SCEV *LDiff = getMinusSCEV(LA, LS);
  3971. const SCEV *RDiff = getMinusSCEV(RA, One);
  3972. if (LDiff == RDiff)
  3973. return getAddExpr(getUMaxExpr(One, LS), LDiff);
  3974. }
  3975. break;
  3976. case ICmpInst::ICMP_EQ:
  3977. // n == 0 ? 1+x : n+x -> umax(n, 1)+x
  3978. if (getTypeSizeInBits(LHS->getType()) <=
  3979. getTypeSizeInBits(U->getType()) &&
  3980. isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
  3981. const SCEV *One = getConstant(U->getType(), 1);
  3982. const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType());
  3983. const SCEV *LA = getSCEV(U->getOperand(1));
  3984. const SCEV *RA = getSCEV(U->getOperand(2));
  3985. const SCEV *LDiff = getMinusSCEV(LA, One);
  3986. const SCEV *RDiff = getMinusSCEV(RA, LS);
  3987. if (LDiff == RDiff)
  3988. return getAddExpr(getUMaxExpr(One, LS), LDiff);
  3989. }
  3990. break;
  3991. default:
  3992. break;
  3993. }
  3994. }
  3995. default: // We cannot analyze this expression.
  3996. break;
  3997. }
  3998. return getUnknown(V);
  3999. }
  4000. //===----------------------------------------------------------------------===//
  4001. // Iteration Count Computation Code
  4002. //
  4003. unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L) {
  4004. if (BasicBlock *ExitingBB = L->getExitingBlock())
  4005. return getSmallConstantTripCount(L, ExitingBB);
  4006. // No trip count information for multiple exits.
  4007. return 0;
  4008. }
  4009. /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
  4010. /// normal unsigned value. Returns 0 if the trip count is unknown or not
  4011. /// constant. Will also return 0 if the maximum trip count is very large (>=
  4012. /// 2^32).
  4013. ///
  4014. /// This "trip count" assumes that control exits via ExitingBlock. More
  4015. /// precisely, it is the number of times that control may reach ExitingBlock
  4016. /// before taking the branch. For loops with multiple exits, it may not be the
  4017. /// number times that the loop header executes because the loop may exit
  4018. /// prematurely via another branch.
  4019. unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L,
  4020. BasicBlock *ExitingBlock) {
  4021. assert(ExitingBlock && "Must pass a non-null exiting block!");
  4022. assert(L->isLoopExiting(ExitingBlock) &&
  4023. "Exiting block must actually branch out of the loop!");
  4024. const SCEVConstant *ExitCount =
  4025. dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
  4026. if (!ExitCount)
  4027. return 0;
  4028. ConstantInt *ExitConst = ExitCount->getValue();
  4029. // Guard against huge trip counts.
  4030. if (ExitConst->getValue().getActiveBits() > 32)
  4031. return 0;
  4032. // In case of integer overflow, this returns 0, which is correct.
  4033. return ((unsigned)ExitConst->getZExtValue()) + 1;
  4034. }
  4035. unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L) {
  4036. if (BasicBlock *ExitingBB = L->getExitingBlock())
  4037. return getSmallConstantTripMultiple(L, ExitingBB);
  4038. // No trip multiple information for multiple exits.
  4039. return 0;
  4040. }
  4041. /// getSmallConstantTripMultiple - Returns the largest constant divisor of the
  4042. /// trip count of this loop as a normal unsigned value, if possible. This
  4043. /// means that the actual trip count is always a multiple of the returned
  4044. /// value (don't forget the trip count could very well be zero as well!).
  4045. ///
  4046. /// Returns 1 if the trip count is unknown or not guaranteed to be the
  4047. /// multiple of a constant (which is also the case if the trip count is simply
  4048. /// constant, use getSmallConstantTripCount for that case), Will also return 1
  4049. /// if the trip count is very large (>= 2^32).
  4050. ///
  4051. /// As explained in the comments for getSmallConstantTripCount, this assumes
  4052. /// that control exits the loop via ExitingBlock.
  4053. unsigned
  4054. ScalarEvolution::getSmallConstantTripMultiple(Loop *L,
  4055. BasicBlock *ExitingBlock) {
  4056. assert(ExitingBlock && "Must pass a non-null exiting block!");
  4057. assert(L->isLoopExiting(ExitingBlock) &&
  4058. "Exiting block must actually branch out of the loop!");
  4059. const SCEV *ExitCount = getExitCount(L, ExitingBlock);
  4060. if (ExitCount == getCouldNotCompute())
  4061. return 1;
  4062. // Get the trip count from the BE count by adding 1.
  4063. const SCEV *TCMul = getAddExpr(ExitCount,
  4064. getConstant(ExitCount->getType(), 1));
  4065. // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt
  4066. // to factor simple cases.
  4067. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul))
  4068. TCMul = Mul->getOperand(0);
  4069. const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul);
  4070. if (!MulC)
  4071. return 1;
  4072. ConstantInt *Result = MulC->getValue();
  4073. // Guard against huge trip counts (this requires checking
  4074. // for zero to handle the case where the trip count == -1 and the
  4075. // addition wraps).
  4076. if (!Result || Result->getValue().getActiveBits() > 32 ||
  4077. Result->getValue().getActiveBits() == 0)
  4078. return 1;
  4079. return (unsigned)Result->getZExtValue();
  4080. }
  4081. // getExitCount - Get the expression for the number of loop iterations for which
  4082. // this loop is guaranteed not to exit via ExitingBlock. Otherwise return
  4083. // SCEVCouldNotCompute.
  4084. const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) {
  4085. return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
  4086. }
  4087. /// getBackedgeTakenCount - If the specified loop has a predictable
  4088. /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
  4089. /// object. The backedge-taken count is the number of times the loop header
  4090. /// will be branched to from within the loop. This is one less than the
  4091. /// trip count of the loop, since it doesn't count the first iteration,
  4092. /// when the header is branched to from outside the loop.
  4093. ///
  4094. /// Note that it is not valid to call this method on a loop without a
  4095. /// loop-invariant backedge-taken count (see
  4096. /// hasLoopInvariantBackedgeTakenCount).
  4097. ///
  4098. const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
  4099. return getBackedgeTakenInfo(L).getExact(this);
  4100. }
  4101. /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
  4102. /// return the least SCEV value that is known never to be less than the
  4103. /// actual backedge taken count.
  4104. const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
  4105. return getBackedgeTakenInfo(L).getMax(this);
  4106. }
  4107. /// PushLoopPHIs - Push PHI nodes in the header of the given loop
  4108. /// onto the given Worklist.
  4109. static void
  4110. PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
  4111. BasicBlock *Header = L->getHeader();
  4112. // Push all Loop-header PHIs onto the Worklist stack.
  4113. for (BasicBlock::iterator I = Header->begin();
  4114. PHINode *PN = dyn_cast<PHINode>(I); ++I)
  4115. Worklist.push_back(PN);
  4116. }
  4117. const ScalarEvolution::BackedgeTakenInfo &
  4118. ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
  4119. // Initially insert an invalid entry for this loop. If the insertion
  4120. // succeeds, proceed to actually compute a backedge-taken count and
  4121. // update the value. The temporary CouldNotCompute value tells SCEV
  4122. // code elsewhere that it shouldn't attempt to request a new
  4123. // backedge-taken count, which could result in infinite recursion.
  4124. std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
  4125. BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo()));
  4126. if (!Pair.second)
  4127. return Pair.first->second;
  4128. // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it
  4129. // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
  4130. // must be cleared in this scope.
  4131. BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L);
  4132. if (Result.getExact(this) != getCouldNotCompute()) {
  4133. assert(isLoopInvariant(Result.getExact(this), L) &&
  4134. isLoopInvariant(Result.getMax(this), L) &&
  4135. "Computed backedge-taken count isn't loop invariant for loop!");
  4136. ++NumTripCountsComputed;
  4137. }
  4138. else if (Result.getMax(this) == getCouldNotCompute() &&
  4139. isa<PHINode>(L->getHeader()->begin())) {
  4140. // Only count loops that have phi nodes as not being computable.
  4141. ++NumTripCountsNotComputed;
  4142. }
  4143. // Now that we know more about the trip count for this loop, forget any
  4144. // existing SCEV values for PHI nodes in this loop since they are only
  4145. // conservative estimates made without the benefit of trip count
  4146. // information. This is similar to the code in forgetLoop, except that
  4147. // it handles SCEVUnknown PHI nodes specially.
  4148. if (Result.hasAnyInfo()) {
  4149. SmallVector<Instruction *, 16> Worklist;
  4150. PushLoopPHIs(L, Worklist);
  4151. SmallPtrSet<Instruction *, 8> Visited;
  4152. while (!Worklist.empty()) {
  4153. Instruction *I = Worklist.pop_back_val();
  4154. if (!Visited.insert(I).second)
  4155. continue;
  4156. ValueExprMapType::iterator It =
  4157. ValueExprMap.find_as(static_cast<Value *>(I));
  4158. if (It != ValueExprMap.end()) {
  4159. const SCEV *Old = It->second;
  4160. // SCEVUnknown for a PHI either means that it has an unrecognized
  4161. // structure, or it's a PHI that's in the progress of being computed
  4162. // by createNodeForPHI. In the former case, additional loop trip
  4163. // count information isn't going to change anything. In the later
  4164. // case, createNodeForPHI will perform the necessary updates on its
  4165. // own when it gets to that point.
  4166. if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
  4167. forgetMemoizedResults(Old);
  4168. ValueExprMap.erase(It);
  4169. }
  4170. if (PHINode *PN = dyn_cast<PHINode>(I))
  4171. ConstantEvolutionLoopExitValue.erase(PN);
  4172. }
  4173. PushDefUseChildren(I, Worklist);
  4174. }
  4175. }
  4176. // Re-lookup the insert position, since the call to
  4177. // ComputeBackedgeTakenCount above could result in a
  4178. // recusive call to getBackedgeTakenInfo (on a different
  4179. // loop), which would invalidate the iterator computed
  4180. // earlier.
  4181. return BackedgeTakenCounts.find(L)->second = Result;
  4182. }
  4183. /// forgetLoop - This method should be called by the client when it has
  4184. /// changed a loop in a way that may effect ScalarEvolution's ability to
  4185. /// compute a trip count, or if the loop is deleted.
  4186. void ScalarEvolution::forgetLoop(const Loop *L) {
  4187. // Drop any stored trip count value.
  4188. DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos =
  4189. BackedgeTakenCounts.find(L);
  4190. if (BTCPos != BackedgeTakenCounts.end()) {
  4191. BTCPos->second.clear();
  4192. BackedgeTakenCounts.erase(BTCPos);
  4193. }
  4194. // Drop information about expressions based on loop-header PHIs.
  4195. SmallVector<Instruction *, 16> Worklist;
  4196. PushLoopPHIs(L, Worklist);
  4197. SmallPtrSet<Instruction *, 8> Visited;
  4198. while (!Worklist.empty()) {
  4199. Instruction *I = Worklist.pop_back_val();
  4200. if (!Visited.insert(I).second)
  4201. continue;
  4202. ValueExprMapType::iterator It =
  4203. ValueExprMap.find_as(static_cast<Value *>(I));
  4204. if (It != ValueExprMap.end()) {
  4205. forgetMemoizedResults(It->second);
  4206. ValueExprMap.erase(It);
  4207. if (PHINode *PN = dyn_cast<PHINode>(I))
  4208. ConstantEvolutionLoopExitValue.erase(PN);
  4209. }
  4210. PushDefUseChildren(I, Worklist);
  4211. }
  4212. // Forget all contained loops too, to avoid dangling entries in the
  4213. // ValuesAtScopes map.
  4214. for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
  4215. forgetLoop(*I);
  4216. }
  4217. /// forgetValue - This method should be called by the client when it has
  4218. /// changed a value in a way that may effect its value, or which may
  4219. /// disconnect it from a def-use chain linking it to a loop.
  4220. void ScalarEvolution::forgetValue(Value *V) {
  4221. Instruction *I = dyn_cast<Instruction>(V);
  4222. if (!I) return;
  4223. // Drop information about expressions based on loop-header PHIs.
  4224. SmallVector<Instruction *, 16> Worklist;
  4225. Worklist.push_back(I);
  4226. SmallPtrSet<Instruction *, 8> Visited;
  4227. while (!Worklist.empty()) {
  4228. I = Worklist.pop_back_val();
  4229. if (!Visited.insert(I).second)
  4230. continue;
  4231. ValueExprMapType::iterator It =
  4232. ValueExprMap.find_as(static_cast<Value *>(I));
  4233. if (It != ValueExprMap.end()) {
  4234. forgetMemoizedResults(It->second);
  4235. ValueExprMap.erase(It);
  4236. if (PHINode *PN = dyn_cast<PHINode>(I))
  4237. ConstantEvolutionLoopExitValue.erase(PN);
  4238. }
  4239. PushDefUseChildren(I, Worklist);
  4240. }
  4241. }
  4242. /// getExact - Get the exact loop backedge taken count considering all loop
  4243. /// exits. A computable result can only be return for loops with a single exit.
  4244. /// Returning the minimum taken count among all exits is incorrect because one
  4245. /// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
  4246. /// the limit of each loop test is never skipped. This is a valid assumption as
  4247. /// long as the loop exits via that test. For precise results, it is the
  4248. /// caller's responsibility to specify the relevant loop exit using
  4249. /// getExact(ExitingBlock, SE).
  4250. const SCEV *
  4251. ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
  4252. // If any exits were not computable, the loop is not computable.
  4253. if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
  4254. // We need exactly one computable exit.
  4255. if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
  4256. assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
  4257. const SCEV *BECount = nullptr;
  4258. for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
  4259. ENT != nullptr; ENT = ENT->getNextExit()) {
  4260. assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
  4261. if (!BECount)
  4262. BECount = ENT->ExactNotTaken;
  4263. else if (BECount != ENT->ExactNotTaken)
  4264. return SE->getCouldNotCompute();
  4265. }
  4266. assert(BECount && "Invalid not taken count for loop exit");
  4267. return BECount;
  4268. }
  4269. /// getExact - Get the exact not taken count for this loop exit.
  4270. const SCEV *
  4271. ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
  4272. ScalarEvolution *SE) const {
  4273. for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
  4274. ENT != nullptr; ENT = ENT->getNextExit()) {
  4275. if (ENT->ExitingBlock == ExitingBlock)
  4276. return ENT->ExactNotTaken;
  4277. }
  4278. return SE->getCouldNotCompute();
  4279. }
  4280. /// getMax - Get the max backedge taken count for the loop.
  4281. const SCEV *
  4282. ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
  4283. return Max ? Max : SE->getCouldNotCompute();
  4284. }
  4285. bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
  4286. ScalarEvolution *SE) const {
  4287. if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S))
  4288. return true;
  4289. if (!ExitNotTaken.ExitingBlock)
  4290. return false;
  4291. for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
  4292. ENT != nullptr; ENT = ENT->getNextExit()) {
  4293. if (ENT->ExactNotTaken != SE->getCouldNotCompute()
  4294. && SE->hasOperand(ENT->ExactNotTaken, S)) {
  4295. return true;
  4296. }
  4297. }
  4298. return false;
  4299. }
  4300. /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
  4301. /// computable exit into a persistent ExitNotTakenInfo array.
  4302. ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
  4303. SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
  4304. bool Complete, const SCEV *MaxCount) : Max(MaxCount) {
  4305. if (!Complete)
  4306. ExitNotTaken.setIncomplete();
  4307. unsigned NumExits = ExitCounts.size();
  4308. if (NumExits == 0) return;
  4309. ExitNotTaken.ExitingBlock = ExitCounts[0].first;
  4310. ExitNotTaken.ExactNotTaken = ExitCounts[0].second;
  4311. if (NumExits == 1) return;
  4312. // Handle the rare case of multiple computable exits.
  4313. ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1];
  4314. ExitNotTakenInfo *PrevENT = &ExitNotTaken;
  4315. for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) {
  4316. PrevENT->setNextExit(ENT);
  4317. ENT->ExitingBlock = ExitCounts[i].first;
  4318. ENT->ExactNotTaken = ExitCounts[i].second;
  4319. }
  4320. }
  4321. /// clear - Invalidate this result and free the ExitNotTakenInfo array.
  4322. void ScalarEvolution::BackedgeTakenInfo::clear() {
  4323. ExitNotTaken.ExitingBlock = nullptr;
  4324. ExitNotTaken.ExactNotTaken = nullptr;
  4325. delete[] ExitNotTaken.getNextExit();
  4326. }
  4327. /// ComputeBackedgeTakenCount - Compute the number of times the backedge
  4328. /// of the specified loop will execute.
  4329. ScalarEvolution::BackedgeTakenInfo
  4330. ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
  4331. SmallVector<BasicBlock *, 8> ExitingBlocks;
  4332. L->getExitingBlocks(ExitingBlocks);
  4333. SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
  4334. bool CouldComputeBECount = true;
  4335. BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
  4336. const SCEV *MustExitMaxBECount = nullptr;
  4337. const SCEV *MayExitMaxBECount = nullptr;
  4338. // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
  4339. // and compute maxBECount.
  4340. for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
  4341. BasicBlock *ExitBB = ExitingBlocks[i];
  4342. ExitLimit EL = ComputeExitLimit(L, ExitBB);
  4343. // 1. For each exit that can be computed, add an entry to ExitCounts.
  4344. // CouldComputeBECount is true only if all exits can be computed.
  4345. if (EL.Exact == getCouldNotCompute())
  4346. // We couldn't compute an exact value for this exit, so
  4347. // we won't be able to compute an exact value for the loop.
  4348. CouldComputeBECount = false;
  4349. else
  4350. ExitCounts.push_back(std::make_pair(ExitBB, EL.Exact));
  4351. // 2. Derive the loop's MaxBECount from each exit's max number of
  4352. // non-exiting iterations. Partition the loop exits into two kinds:
  4353. // LoopMustExits and LoopMayExits.
  4354. //
  4355. // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
  4356. // is a LoopMayExit. If any computable LoopMustExit is found, then
  4357. // MaxBECount is the minimum EL.Max of computable LoopMustExits. Otherwise,
  4358. // MaxBECount is conservatively the maximum EL.Max, where CouldNotCompute is
  4359. // considered greater than any computable EL.Max.
  4360. if (EL.Max != getCouldNotCompute() && Latch &&
  4361. DT->dominates(ExitBB, Latch)) {
  4362. if (!MustExitMaxBECount)
  4363. MustExitMaxBECount = EL.Max;
  4364. else {
  4365. MustExitMaxBECount =
  4366. getUMinFromMismatchedTypes(MustExitMaxBECount, EL.Max);
  4367. }
  4368. } else if (MayExitMaxBECount != getCouldNotCompute()) {
  4369. if (!MayExitMaxBECount || EL.Max == getCouldNotCompute())
  4370. MayExitMaxBECount = EL.Max;
  4371. else {
  4372. MayExitMaxBECount =
  4373. getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.Max);
  4374. }
  4375. }
  4376. }
  4377. const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
  4378. (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
  4379. return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
  4380. }
  4381. /// ComputeExitLimit - Compute the number of times the backedge of the specified
  4382. /// loop will execute if it exits via the specified block.
  4383. ScalarEvolution::ExitLimit
  4384. ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
  4385. // Okay, we've chosen an exiting block. See what condition causes us to
  4386. // exit at this block and remember the exit block and whether all other targets
  4387. // lead to the loop header.
  4388. bool MustExecuteLoopHeader = true;
  4389. BasicBlock *Exit = nullptr;
  4390. for (succ_iterator SI = succ_begin(ExitingBlock), SE = succ_end(ExitingBlock);
  4391. SI != SE; ++SI)
  4392. if (!L->contains(*SI)) {
  4393. if (Exit) // Multiple exit successors.
  4394. return getCouldNotCompute();
  4395. Exit = *SI;
  4396. } else if (*SI != L->getHeader()) {
  4397. MustExecuteLoopHeader = false;
  4398. }
  4399. // At this point, we know we have a conditional branch that determines whether
  4400. // the loop is exited. However, we don't know if the branch is executed each
  4401. // time through the loop. If not, then the execution count of the branch will
  4402. // not be equal to the trip count of the loop.
  4403. //
  4404. // Currently we check for this by checking to see if the Exit branch goes to
  4405. // the loop header. If so, we know it will always execute the same number of
  4406. // times as the loop. We also handle the case where the exit block *is* the
  4407. // loop header. This is common for un-rotated loops.
  4408. //
  4409. // If both of those tests fail, walk up the unique predecessor chain to the
  4410. // header, stopping if there is an edge that doesn't exit the loop. If the
  4411. // header is reached, the execution count of the branch will be equal to the
  4412. // trip count of the loop.
  4413. //
  4414. // More extensive analysis could be done to handle more cases here.
  4415. //
  4416. if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) {
  4417. // The simple checks failed, try climbing the unique predecessor chain
  4418. // up to the header.
  4419. bool Ok = false;
  4420. for (BasicBlock *BB = ExitingBlock; BB; ) {
  4421. BasicBlock *Pred = BB->getUniquePredecessor();
  4422. if (!Pred)
  4423. return getCouldNotCompute();
  4424. TerminatorInst *PredTerm = Pred->getTerminator();
  4425. for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
  4426. BasicBlock *PredSucc = PredTerm->getSuccessor(i);
  4427. if (PredSucc == BB)
  4428. continue;
  4429. // If the predecessor has a successor that isn't BB and isn't
  4430. // outside the loop, assume the worst.
  4431. if (L->contains(PredSucc))
  4432. return getCouldNotCompute();
  4433. }
  4434. if (Pred == L->getHeader()) {
  4435. Ok = true;
  4436. break;
  4437. }
  4438. BB = Pred;
  4439. }
  4440. if (!Ok)
  4441. return getCouldNotCompute();
  4442. }
  4443. bool IsOnlyExit = (L->getExitingBlock() != nullptr);
  4444. TerminatorInst *Term = ExitingBlock->getTerminator();
  4445. if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
  4446. assert(BI->isConditional() && "If unconditional, it can't be in loop!");
  4447. // Proceed to the next level to examine the exit condition expression.
  4448. return ComputeExitLimitFromCond(L, BI->getCondition(), BI->getSuccessor(0),
  4449. BI->getSuccessor(1),
  4450. /*ControlsExit=*/IsOnlyExit);
  4451. }
  4452. if (SwitchInst *SI = dyn_cast<SwitchInst>(Term))
  4453. return ComputeExitLimitFromSingleExitSwitch(L, SI, Exit,
  4454. /*ControlsExit=*/IsOnlyExit);
  4455. return getCouldNotCompute();
  4456. }
  4457. /// ComputeExitLimitFromCond - Compute the number of times the
  4458. /// backedge of the specified loop will execute if its exit condition
  4459. /// were a conditional branch of ExitCond, TBB, and FBB.
  4460. ///
  4461. /// @param ControlsExit is true if ExitCond directly controls the exit
  4462. /// branch. In this case, we can assume that the loop exits only if the
  4463. /// condition is true and can infer that failing to meet the condition prior to
  4464. /// integer wraparound results in undefined behavior.
  4465. ScalarEvolution::ExitLimit
  4466. ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
  4467. Value *ExitCond,
  4468. BasicBlock *TBB,
  4469. BasicBlock *FBB,
  4470. bool ControlsExit) {
  4471. // Check if the controlling expression for this loop is an And or Or.
  4472. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
  4473. if (BO->getOpcode() == Instruction::And) {
  4474. // Recurse on the operands of the and.
  4475. bool EitherMayExit = L->contains(TBB);
  4476. ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
  4477. ControlsExit && !EitherMayExit);
  4478. ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
  4479. ControlsExit && !EitherMayExit);
  4480. const SCEV *BECount = getCouldNotCompute();
  4481. const SCEV *MaxBECount = getCouldNotCompute();
  4482. if (EitherMayExit) {
  4483. // Both conditions must be true for the loop to continue executing.
  4484. // Choose the less conservative count.
  4485. if (EL0.Exact == getCouldNotCompute() ||
  4486. EL1.Exact == getCouldNotCompute())
  4487. BECount = getCouldNotCompute();
  4488. else
  4489. BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
  4490. if (EL0.Max == getCouldNotCompute())
  4491. MaxBECount = EL1.Max;
  4492. else if (EL1.Max == getCouldNotCompute())
  4493. MaxBECount = EL0.Max;
  4494. else
  4495. MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
  4496. } else {
  4497. // Both conditions must be true at the same time for the loop to exit.
  4498. // For now, be conservative.
  4499. assert(L->contains(FBB) && "Loop block has no successor in loop!");
  4500. if (EL0.Max == EL1.Max)
  4501. MaxBECount = EL0.Max;
  4502. if (EL0.Exact == EL1.Exact)
  4503. BECount = EL0.Exact;
  4504. }
  4505. return ExitLimit(BECount, MaxBECount);
  4506. }
  4507. if (BO->getOpcode() == Instruction::Or) {
  4508. // Recurse on the operands of the or.
  4509. bool EitherMayExit = L->contains(FBB);
  4510. ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
  4511. ControlsExit && !EitherMayExit);
  4512. ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
  4513. ControlsExit && !EitherMayExit);
  4514. const SCEV *BECount = getCouldNotCompute();
  4515. const SCEV *MaxBECount = getCouldNotCompute();
  4516. if (EitherMayExit) {
  4517. // Both conditions must be false for the loop to continue executing.
  4518. // Choose the less conservative count.
  4519. if (EL0.Exact == getCouldNotCompute() ||
  4520. EL1.Exact == getCouldNotCompute())
  4521. BECount = getCouldNotCompute();
  4522. else
  4523. BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
  4524. if (EL0.Max == getCouldNotCompute())
  4525. MaxBECount = EL1.Max;
  4526. else if (EL1.Max == getCouldNotCompute())
  4527. MaxBECount = EL0.Max;
  4528. else
  4529. MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
  4530. } else {
  4531. // Both conditions must be false at the same time for the loop to exit.
  4532. // For now, be conservative.
  4533. assert(L->contains(TBB) && "Loop block has no successor in loop!");
  4534. if (EL0.Max == EL1.Max)
  4535. MaxBECount = EL0.Max;
  4536. if (EL0.Exact == EL1.Exact)
  4537. BECount = EL0.Exact;
  4538. }
  4539. return ExitLimit(BECount, MaxBECount);
  4540. }
  4541. }
  4542. // With an icmp, it may be feasible to compute an exact backedge-taken count.
  4543. // Proceed to the next level to examine the icmp.
  4544. if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
  4545. return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit);
  4546. // Check for a constant condition. These are normally stripped out by
  4547. // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
  4548. // preserve the CFG and is temporarily leaving constant conditions
  4549. // in place.
  4550. if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
  4551. if (L->contains(FBB) == !CI->getZExtValue())
  4552. // The backedge is always taken.
  4553. return getCouldNotCompute();
  4554. else
  4555. // The backedge is never taken.
  4556. return getConstant(CI->getType(), 0);
  4557. }
  4558. // If it's not an integer or pointer comparison then compute it the hard way.
  4559. return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
  4560. }
  4561. /// ComputeExitLimitFromICmp - Compute the number of times the
  4562. /// backedge of the specified loop will execute if its exit condition
  4563. /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
  4564. ScalarEvolution::ExitLimit
  4565. ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
  4566. ICmpInst *ExitCond,
  4567. BasicBlock *TBB,
  4568. BasicBlock *FBB,
  4569. bool ControlsExit) {
  4570. // If the condition was exit on true, convert the condition to exit on false
  4571. ICmpInst::Predicate Cond;
  4572. if (!L->contains(FBB))
  4573. Cond = ExitCond->getPredicate();
  4574. else
  4575. Cond = ExitCond->getInversePredicate();
  4576. // Handle common loops like: for (X = "string"; *X; ++X)
  4577. if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
  4578. if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
  4579. ExitLimit ItCnt =
  4580. ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond);
  4581. if (ItCnt.hasAnyInfo())
  4582. return ItCnt;
  4583. }
  4584. const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
  4585. const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
  4586. // Try to evaluate any dependencies out of the loop.
  4587. LHS = getSCEVAtScope(LHS, L);
  4588. RHS = getSCEVAtScope(RHS, L);
  4589. // At this point, we would like to compute how many iterations of the
  4590. // loop the predicate will return true for these inputs.
  4591. if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
  4592. // If there is a loop-invariant, force it into the RHS.
  4593. std::swap(LHS, RHS);
  4594. Cond = ICmpInst::getSwappedPredicate(Cond);
  4595. }
  4596. // Simplify the operands before analyzing them.
  4597. (void)SimplifyICmpOperands(Cond, LHS, RHS);
  4598. // If we have a comparison of a chrec against a constant, try to use value
  4599. // ranges to answer this query.
  4600. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
  4601. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
  4602. if (AddRec->getLoop() == L) {
  4603. // Form the constant range.
  4604. ConstantRange CompRange(
  4605. ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
  4606. const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
  4607. if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
  4608. }
  4609. // HLSL Change - begin
  4610. // Try to compute the value exhaustively *right now*. Before trying the more pessimistic
  4611. // partial evaluation.
  4612. const SCEV *AggresiveResult = ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
  4613. if (AggresiveResult != getCouldNotCompute())
  4614. return AggresiveResult;
  4615. // HLSL Change - end
  4616. switch (Cond) {
  4617. case ICmpInst::ICMP_NE: { // while (X != Y)
  4618. // Convert to: while (X-Y != 0)
  4619. ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
  4620. if (EL.hasAnyInfo()) return EL;
  4621. break;
  4622. }
  4623. case ICmpInst::ICMP_EQ: { // while (X == Y)
  4624. // Convert to: while (X-Y == 0)
  4625. ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
  4626. if (EL.hasAnyInfo()) return EL;
  4627. break;
  4628. }
  4629. case ICmpInst::ICMP_SLT:
  4630. case ICmpInst::ICMP_ULT: { // while (X < Y)
  4631. bool IsSigned = Cond == ICmpInst::ICMP_SLT;
  4632. ExitLimit EL = HowManyLessThans(LHS, RHS, L, IsSigned, ControlsExit);
  4633. if (EL.hasAnyInfo()) return EL;
  4634. break;
  4635. }
  4636. case ICmpInst::ICMP_SGT:
  4637. case ICmpInst::ICMP_UGT: { // while (X > Y)
  4638. bool IsSigned = Cond == ICmpInst::ICMP_SGT;
  4639. ExitLimit EL = HowManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit);
  4640. if (EL.hasAnyInfo()) return EL;
  4641. break;
  4642. }
  4643. default:
  4644. #if 0
  4645. dbgs() << "ComputeBackedgeTakenCount ";
  4646. if (ExitCond->getOperand(0)->getType()->isUnsigned())
  4647. dbgs() << "[unsigned] ";
  4648. dbgs() << *LHS << " "
  4649. << Instruction::getOpcodeName(Instruction::ICmp)
  4650. << " " << *RHS << "\n";
  4651. #endif
  4652. break;
  4653. }
  4654. // return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); // HLSL Change
  4655. return getCouldNotCompute(); // HLSL Change - We already tried the exhaustive approach earlier, so don't try again and just give up.
  4656. }
  4657. ScalarEvolution::ExitLimit
  4658. ScalarEvolution::ComputeExitLimitFromSingleExitSwitch(const Loop *L,
  4659. SwitchInst *Switch,
  4660. BasicBlock *ExitingBlock,
  4661. bool ControlsExit) {
  4662. assert(!L->contains(ExitingBlock) && "Not an exiting block!");
  4663. // Give up if the exit is the default dest of a switch.
  4664. if (Switch->getDefaultDest() == ExitingBlock)
  4665. return getCouldNotCompute();
  4666. assert(L->contains(Switch->getDefaultDest()) &&
  4667. "Default case must not exit the loop!");
  4668. const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
  4669. const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
  4670. // while (X != Y) --> while (X-Y != 0)
  4671. ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
  4672. if (EL.hasAnyInfo())
  4673. return EL;
  4674. return getCouldNotCompute();
  4675. }
  4676. static ConstantInt *
  4677. EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
  4678. ScalarEvolution &SE) {
  4679. const SCEV *InVal = SE.getConstant(C);
  4680. const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
  4681. assert(isa<SCEVConstant>(Val) &&
  4682. "Evaluation of SCEV at constant didn't fold correctly?");
  4683. return cast<SCEVConstant>(Val)->getValue();
  4684. }
  4685. /// ComputeLoadConstantCompareExitLimit - Given an exit condition of
  4686. /// 'icmp op load X, cst', try to see if we can compute the backedge
  4687. /// execution count.
  4688. ScalarEvolution::ExitLimit
  4689. ScalarEvolution::ComputeLoadConstantCompareExitLimit(
  4690. LoadInst *LI,
  4691. Constant *RHS,
  4692. const Loop *L,
  4693. ICmpInst::Predicate predicate) {
  4694. if (LI->isVolatile()) return getCouldNotCompute();
  4695. // Check to see if the loaded pointer is a getelementptr of a global.
  4696. // TODO: Use SCEV instead of manually grubbing with GEPs.
  4697. GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
  4698. if (!GEP) return getCouldNotCompute();
  4699. // Make sure that it is really a constant global we are gepping, with an
  4700. // initializer, and make sure the first IDX is really 0.
  4701. GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
  4702. if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
  4703. GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
  4704. !cast<Constant>(GEP->getOperand(1))->isNullValue())
  4705. return getCouldNotCompute();
  4706. // Okay, we allow one non-constant index into the GEP instruction.
  4707. Value *VarIdx = nullptr;
  4708. std::vector<Constant*> Indexes;
  4709. unsigned VarIdxNum = 0;
  4710. for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
  4711. if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
  4712. Indexes.push_back(CI);
  4713. } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
  4714. if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
  4715. VarIdx = GEP->getOperand(i);
  4716. VarIdxNum = i-2;
  4717. Indexes.push_back(nullptr);
  4718. }
  4719. // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
  4720. if (!VarIdx)
  4721. return getCouldNotCompute();
  4722. // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
  4723. // Check to see if X is a loop variant variable value now.
  4724. const SCEV *Idx = getSCEV(VarIdx);
  4725. Idx = getSCEVAtScope(Idx, L);
  4726. // We can only recognize very limited forms of loop index expressions, in
  4727. // particular, only affine AddRec's like {C1,+,C2}.
  4728. const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
  4729. if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
  4730. !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
  4731. !isa<SCEVConstant>(IdxExpr->getOperand(1)))
  4732. return getCouldNotCompute();
  4733. unsigned MaxSteps = MaxBruteForceIterations;
  4734. for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
  4735. ConstantInt *ItCst = ConstantInt::get(
  4736. cast<IntegerType>(IdxExpr->getType()), IterationNum);
  4737. ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
  4738. // Form the GEP offset.
  4739. Indexes[VarIdxNum] = Val;
  4740. Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
  4741. Indexes);
  4742. if (!Result) break; // Cannot compute!
  4743. // Evaluate the condition for this iteration.
  4744. Result = ConstantExpr::getICmp(predicate, Result, RHS);
  4745. if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
  4746. if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
  4747. #if 0
  4748. dbgs() << "\n***\n*** Computed loop count " << *ItCst
  4749. << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
  4750. << "***\n";
  4751. #endif
  4752. ++NumArrayLenItCounts;
  4753. return getConstant(ItCst); // Found terminating iteration!
  4754. }
  4755. }
  4756. return getCouldNotCompute();
  4757. }
  4758. /// CanConstantFold - Return true if we can constant fold an instruction of the
  4759. /// specified type, assuming that all operands were constants.
  4760. static bool CanConstantFold(const Instruction *I) {
  4761. if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
  4762. isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
  4763. isa<LoadInst>(I))
  4764. return true;
  4765. if (const CallInst *CI = dyn_cast<CallInst>(I))
  4766. if (const Function *F = CI->getCalledFunction())
  4767. return canConstantFoldCallTo(F);
  4768. return false;
  4769. }
  4770. /// Determine whether this instruction can constant evolve within this loop
  4771. /// assuming its operands can all constant evolve.
  4772. static bool canConstantEvolve(Instruction *I, const Loop *L) {
  4773. // An instruction outside of the loop can't be derived from a loop PHI.
  4774. if (!L->contains(I)) return false;
  4775. if (isa<PHINode>(I)) {
  4776. // We don't currently keep track of the control flow needed to evaluate
  4777. // PHIs, so we cannot handle PHIs inside of loops.
  4778. return L->getHeader() == I->getParent();
  4779. }
  4780. // If we won't be able to constant fold this expression even if the operands
  4781. // are constants, bail early.
  4782. return CanConstantFold(I);
  4783. }
  4784. /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
  4785. /// recursing through each instruction operand until reaching a loop header phi.
  4786. static PHINode *
  4787. getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
  4788. DxilValueCache *DVC, // HLSL Change
  4789. DenseMap<Instruction *, PHINode *> &PHIMap) {
  4790. // Otherwise, we can evaluate this instruction if all of its operands are
  4791. // constant or derived from a PHI node themselves.
  4792. PHINode *PHI = nullptr;
  4793. for (Instruction::op_iterator OpI = UseInst->op_begin(),
  4794. OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
  4795. if (isa<Constant>(*OpI)) continue;
  4796. // HLSL Change begin
  4797. if (DVC->GetConstValue(*OpI)) continue;
  4798. // HLSL Change end
  4799. Instruction *OpInst = dyn_cast<Instruction>(*OpI);
  4800. if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
  4801. PHINode *P = dyn_cast<PHINode>(OpInst);
  4802. if (!P)
  4803. // If this operand is already visited, reuse the prior result.
  4804. // We may have P != PHI if this is the deepest point at which the
  4805. // inconsistent paths meet.
  4806. P = PHIMap.lookup(OpInst);
  4807. if (!P) {
  4808. // Recurse and memoize the results, whether a phi is found or not.
  4809. // This recursive call invalidates pointers into PHIMap.
  4810. //P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap); // HLSL Change
  4811. P = getConstantEvolvingPHIOperands(OpInst, L, DVC, PHIMap); // HLSL Change - Pass DVC
  4812. PHIMap[OpInst] = P;
  4813. }
  4814. if (!P)
  4815. return nullptr; // Not evolving from PHI
  4816. if (PHI && PHI != P)
  4817. return nullptr; // Evolving from multiple different PHIs.
  4818. PHI = P;
  4819. }
  4820. // This is a expression evolving from a constant PHI!
  4821. return PHI;
  4822. }
  4823. /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
  4824. /// in the loop that V is derived from. We allow arbitrary operations along the
  4825. /// way, but the operands of an operation must either be constants or a value
  4826. /// derived from a constant PHI. If this expression does not fit with these
  4827. /// constraints, return null.
  4828. // static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { // HLSL Change
  4829. static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L, DxilValueCache *DVC) { // HLSL Change
  4830. Instruction *I = dyn_cast<Instruction>(V);
  4831. if (!I || !canConstantEvolve(I, L)) return nullptr;
  4832. if (PHINode *PN = dyn_cast<PHINode>(I)) {
  4833. return PN;
  4834. }
  4835. // Record non-constant instructions contained by the loop.
  4836. DenseMap<Instruction *, PHINode *> PHIMap;
  4837. // return getConstantEvolvingPHIOperands(I, L, PHIMap); // HLSL Change
  4838. return getConstantEvolvingPHIOperands(I, L, DVC, PHIMap); // HLSL Change
  4839. }
  4840. /// EvaluateExpression - Given an expression that passes the
  4841. /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
  4842. /// in the loop has the value PHIVal. If we can't fold this expression for some
  4843. /// reason, return null.
  4844. static Constant *EvaluateExpression(Value *V, const Loop *L,
  4845. DenseMap<Instruction *, Constant *> &Vals,
  4846. const DataLayout &DL,
  4847. const TargetLibraryInfo *TLI) {
  4848. // Convenient constant check, but redundant for recursive calls.
  4849. if (Constant *C = dyn_cast<Constant>(V)) return C;
  4850. Instruction *I = dyn_cast<Instruction>(V);
  4851. if (!I) return nullptr;
  4852. if (Constant *C = Vals.lookup(I)) return C;
  4853. // An instruction inside the loop depends on a value outside the loop that we
  4854. // weren't given a mapping for, or a value such as a call inside the loop.
  4855. if (!canConstantEvolve(I, L)) return nullptr;
  4856. // An unmapped PHI can be due to a branch or another loop inside this loop,
  4857. // or due to this not being the initial iteration through a loop where we
  4858. // couldn't compute the evolution of this particular PHI last time.
  4859. if (isa<PHINode>(I)) return nullptr;
  4860. std::vector<Constant*> Operands(I->getNumOperands());
  4861. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  4862. Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
  4863. if (!Operand) {
  4864. Operands[i] = dyn_cast<Constant>(I->getOperand(i));
  4865. if (!Operands[i]) return nullptr;
  4866. continue;
  4867. }
  4868. Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
  4869. Vals[Operand] = C;
  4870. if (!C) return nullptr;
  4871. Operands[i] = C;
  4872. }
  4873. if (CmpInst *CI = dyn_cast<CmpInst>(I))
  4874. return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
  4875. Operands[1], DL, TLI);
  4876. if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
  4877. if (!LI->isVolatile())
  4878. return ConstantFoldLoadFromConstPtr(Operands[0], DL);
  4879. }
  4880. return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL,
  4881. TLI);
  4882. }
  4883. /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
  4884. /// in the header of its containing loop, we know the loop executes a
  4885. /// constant number of times, and the PHI node is just a recurrence
  4886. /// involving constants, fold it.
  4887. Constant *
  4888. ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
  4889. const APInt &BEs,
  4890. const Loop *L) {
  4891. DenseMap<PHINode*, Constant*>::const_iterator I =
  4892. ConstantEvolutionLoopExitValue.find(PN);
  4893. if (I != ConstantEvolutionLoopExitValue.end())
  4894. return I->second;
  4895. if (BEs.ugt(MaxBruteForceIterations))
  4896. return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
  4897. Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
  4898. DenseMap<Instruction *, Constant *> CurrentIterVals;
  4899. BasicBlock *Header = L->getHeader();
  4900. assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
  4901. // Since the loop is canonicalized, the PHI node must have two entries. One
  4902. // entry must be a constant (coming in from outside of the loop), and the
  4903. // second must be derived from the same PHI.
  4904. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
  4905. PHINode *PHI = nullptr;
  4906. for (BasicBlock::iterator I = Header->begin();
  4907. (PHI = dyn_cast<PHINode>(I)); ++I) {
  4908. Constant *StartCST =
  4909. dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
  4910. if (!StartCST) continue;
  4911. CurrentIterVals[PHI] = StartCST;
  4912. }
  4913. if (!CurrentIterVals.count(PN))
  4914. return RetVal = nullptr;
  4915. Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
  4916. // Execute the loop symbolically to determine the exit value.
  4917. if (BEs.getActiveBits() >= 32)
  4918. return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it!
  4919. unsigned NumIterations = BEs.getZExtValue(); // must be in range
  4920. unsigned IterationNum = 0;
  4921. const DataLayout &DL = F->getParent()->getDataLayout();
  4922. for (; ; ++IterationNum) {
  4923. if (IterationNum == NumIterations)
  4924. return RetVal = CurrentIterVals[PN]; // Got exit value!
  4925. // Compute the value of the PHIs for the next iteration.
  4926. // EvaluateExpression adds non-phi values to the CurrentIterVals map.
  4927. DenseMap<Instruction *, Constant *> NextIterVals;
  4928. Constant *NextPHI =
  4929. EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
  4930. if (!NextPHI)
  4931. return nullptr; // Couldn't evaluate!
  4932. NextIterVals[PN] = NextPHI;
  4933. bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
  4934. // Also evaluate the other PHI nodes. However, we don't get to stop if we
  4935. // cease to be able to evaluate one of them or if they stop evolving,
  4936. // because that doesn't necessarily prevent us from computing PN.
  4937. SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
  4938. for (DenseMap<Instruction *, Constant *>::const_iterator
  4939. I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
  4940. PHINode *PHI = dyn_cast<PHINode>(I->first);
  4941. if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
  4942. PHIsToCompute.push_back(std::make_pair(PHI, I->second));
  4943. }
  4944. // We use two distinct loops because EvaluateExpression may invalidate any
  4945. // iterators into CurrentIterVals.
  4946. for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
  4947. I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
  4948. PHINode *PHI = I->first;
  4949. Constant *&NextPHI = NextIterVals[PHI];
  4950. if (!NextPHI) { // Not already computed.
  4951. Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
  4952. NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
  4953. }
  4954. if (NextPHI != I->second)
  4955. StoppedEvolving = false;
  4956. }
  4957. // If all entries in CurrentIterVals == NextIterVals then we can stop
  4958. // iterating, the loop can't continue to change.
  4959. if (StoppedEvolving)
  4960. return RetVal = CurrentIterVals[PN];
  4961. CurrentIterVals.swap(NextIterVals);
  4962. }
  4963. }
  4964. /// ComputeExitCountExhaustively - If the loop is known to execute a
  4965. /// constant number of times (the condition evolves only from constants),
  4966. /// try to evaluate a few iterations of the loop until we get the exit
  4967. /// condition gets a value of ExitWhen (true or false). If we cannot
  4968. /// evaluate the trip count of the loop, return getCouldNotCompute().
  4969. const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
  4970. Value *Cond,
  4971. bool ExitWhen) {
  4972. // PHINode *PN = getConstantEvolvingPHI(Cond, L); // HLSL Change
  4973. PHINode *PN = getConstantEvolvingPHI(Cond, L, &getAnalysis<DxilValueCache>()); // HLSL Change
  4974. if (!PN) return getCouldNotCompute();
  4975. // If the loop is canonicalized, the PHI will have exactly two entries.
  4976. // That's the only form we support here.
  4977. if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
  4978. DenseMap<Instruction *, Constant *> CurrentIterVals;
  4979. BasicBlock *Header = L->getHeader();
  4980. assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
  4981. // One entry must be a constant (coming in from outside of the loop), and the
  4982. // second must be derived from the same PHI.
  4983. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
  4984. PHINode *PHI = nullptr;
  4985. for (BasicBlock::iterator I = Header->begin();
  4986. (PHI = dyn_cast<PHINode>(I)); ++I) {
  4987. Constant *StartCST =
  4988. dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
  4989. // HLSL Change begin
  4990. // If we don't have a constant, try getting a constant from the value cache.
  4991. if (!StartCST)
  4992. if (Constant *C = getAnalysis<DxilValueCache>().GetConstValue(PHI->getIncomingValue(!SecondIsBackedge)))
  4993. StartCST = C;
  4994. // HLSL Change end
  4995. if (!StartCST) continue;
  4996. CurrentIterVals[PHI] = StartCST;
  4997. }
  4998. if (!CurrentIterVals.count(PN))
  4999. return getCouldNotCompute();
  5000. // HLSL Change begin
  5001. SmallVector<std::pair<Instruction *, Constant *>, 4> KnownInvariantOps;
  5002. if (Instruction *CondI = dyn_cast<Instruction>(Cond)) {
  5003. SmallVector<Instruction *, 4> Worklist;
  5004. DxilValueCache *DVC = &getAnalysis<DxilValueCache>();
  5005. Worklist.push_back(CondI);
  5006. while (Worklist.size()) {
  5007. Instruction *I = Worklist.pop_back_val();
  5008. if (Constant *C = DVC->GetConstValue(I)) {
  5009. KnownInvariantOps.push_back({ I, C });
  5010. }
  5011. else if (CurrentIterVals.count(I)) {
  5012. continue;
  5013. }
  5014. else if (L->contains(I)) {
  5015. for (Use &U : I->operands()) {
  5016. if (Instruction *OpI = dyn_cast<Instruction>(U.get())) {
  5017. Worklist.push_back(OpI);
  5018. }
  5019. }
  5020. }
  5021. }
  5022. }
  5023. // HLSL Change end
  5024. // Okay, we find a PHI node that defines the trip count of this loop. Execute
  5025. // the loop symbolically to determine when the condition gets a value of
  5026. // "ExitWhen".
  5027. unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
  5028. const DataLayout &DL = F->getParent()->getDataLayout();
  5029. for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
  5030. // HLSL Change begin
  5031. for (std::pair<Instruction *, Constant *> &Pair : KnownInvariantOps)
  5032. CurrentIterVals[Pair.first] = Pair.second;
  5033. // HLSL Change end
  5034. ConstantInt *CondVal = dyn_cast_or_null<ConstantInt>(
  5035. EvaluateExpression(Cond, L, CurrentIterVals, DL, TLI));
  5036. // Couldn't symbolically evaluate.
  5037. if (!CondVal) return getCouldNotCompute();
  5038. if (CondVal->getValue() == uint64_t(ExitWhen)) {
  5039. ++NumBruteForceTripCountsComputed;
  5040. return getConstant(Type::getInt32Ty(getContext()), IterationNum);
  5041. }
  5042. // Update all the PHI nodes for the next iteration.
  5043. DenseMap<Instruction *, Constant *> NextIterVals;
  5044. // Create a list of which PHIs we need to compute. We want to do this before
  5045. // calling EvaluateExpression on them because that may invalidate iterators
  5046. // into CurrentIterVals.
  5047. SmallVector<PHINode *, 8> PHIsToCompute;
  5048. for (DenseMap<Instruction *, Constant *>::const_iterator
  5049. I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
  5050. PHINode *PHI = dyn_cast<PHINode>(I->first);
  5051. if (!PHI || PHI->getParent() != Header) continue;
  5052. PHIsToCompute.push_back(PHI);
  5053. }
  5054. for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
  5055. E = PHIsToCompute.end(); I != E; ++I) {
  5056. PHINode *PHI = *I;
  5057. Constant *&NextPHI = NextIterVals[PHI];
  5058. if (NextPHI) continue; // Already computed!
  5059. Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
  5060. NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
  5061. }
  5062. CurrentIterVals.swap(NextIterVals);
  5063. }
  5064. // Too many iterations were needed to evaluate.
  5065. return getCouldNotCompute();
  5066. }
  5067. /// getSCEVAtScope - Return a SCEV expression for the specified value
  5068. /// at the specified scope in the program. The L value specifies a loop
  5069. /// nest to evaluate the expression at, where null is the top-level or a
  5070. /// specified loop is immediately inside of the loop.
  5071. ///
  5072. /// This method can be used to compute the exit value for a variable defined
  5073. /// in a loop by querying what the value will hold in the parent loop.
  5074. ///
  5075. /// In the case that a relevant loop exit value cannot be computed, the
  5076. /// original value V is returned.
  5077. const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
  5078. // Check to see if we've folded this expression at this loop before.
  5079. SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = ValuesAtScopes[V];
  5080. for (unsigned u = 0; u < Values.size(); u++) {
  5081. if (Values[u].first == L)
  5082. return Values[u].second ? Values[u].second : V;
  5083. }
  5084. Values.push_back(std::make_pair(L, static_cast<const SCEV *>(nullptr)));
  5085. // Otherwise compute it.
  5086. const SCEV *C = computeSCEVAtScope(V, L);
  5087. SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V];
  5088. for (unsigned u = Values2.size(); u > 0; u--) {
  5089. if (Values2[u - 1].first == L) {
  5090. Values2[u - 1].second = C;
  5091. break;
  5092. }
  5093. }
  5094. return C;
  5095. }
  5096. /// This builds up a Constant using the ConstantExpr interface. That way, we
  5097. /// will return Constants for objects which aren't represented by a
  5098. /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
  5099. /// Returns NULL if the SCEV isn't representable as a Constant.
  5100. static Constant *BuildConstantFromSCEV(const SCEV *V) {
  5101. switch (static_cast<SCEVTypes>(V->getSCEVType())) {
  5102. case scCouldNotCompute:
  5103. case scAddRecExpr:
  5104. break;
  5105. case scConstant:
  5106. return cast<SCEVConstant>(V)->getValue();
  5107. case scUnknown:
  5108. return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
  5109. case scSignExtend: {
  5110. const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
  5111. if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
  5112. return ConstantExpr::getSExt(CastOp, SS->getType());
  5113. break;
  5114. }
  5115. case scZeroExtend: {
  5116. const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
  5117. if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
  5118. return ConstantExpr::getZExt(CastOp, SZ->getType());
  5119. break;
  5120. }
  5121. case scTruncate: {
  5122. const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
  5123. if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
  5124. return ConstantExpr::getTrunc(CastOp, ST->getType());
  5125. break;
  5126. }
  5127. case scAddExpr: {
  5128. const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
  5129. if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
  5130. if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
  5131. unsigned AS = PTy->getAddressSpace();
  5132. Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
  5133. C = ConstantExpr::getBitCast(C, DestPtrTy);
  5134. }
  5135. for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
  5136. Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
  5137. if (!C2) return nullptr;
  5138. // First pointer!
  5139. if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
  5140. unsigned AS = C2->getType()->getPointerAddressSpace();
  5141. std::swap(C, C2);
  5142. Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
  5143. // The offsets have been converted to bytes. We can add bytes to an
  5144. // i8* by GEP with the byte count in the first index.
  5145. C = ConstantExpr::getBitCast(C, DestPtrTy);
  5146. }
  5147. // Don't bother trying to sum two pointers. We probably can't
  5148. // statically compute a load that results from it anyway.
  5149. if (C2->getType()->isPointerTy())
  5150. return nullptr;
  5151. if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
  5152. if (PTy->getElementType()->isStructTy())
  5153. C2 = ConstantExpr::getIntegerCast(
  5154. C2, Type::getInt32Ty(C->getContext()), true);
  5155. C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2);
  5156. } else
  5157. C = ConstantExpr::getAdd(C, C2);
  5158. }
  5159. return C;
  5160. }
  5161. break;
  5162. }
  5163. case scMulExpr: {
  5164. const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
  5165. if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
  5166. // Don't bother with pointers at all.
  5167. if (C->getType()->isPointerTy()) return nullptr;
  5168. for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
  5169. Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
  5170. if (!C2 || C2->getType()->isPointerTy()) return nullptr;
  5171. C = ConstantExpr::getMul(C, C2);
  5172. }
  5173. return C;
  5174. }
  5175. break;
  5176. }
  5177. case scUDivExpr: {
  5178. const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
  5179. if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
  5180. if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
  5181. if (LHS->getType() == RHS->getType())
  5182. return ConstantExpr::getUDiv(LHS, RHS);
  5183. break;
  5184. }
  5185. case scSMaxExpr:
  5186. case scUMaxExpr:
  5187. break; // TODO: smax, umax.
  5188. }
  5189. return nullptr;
  5190. }
  5191. const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
  5192. if (isa<SCEVConstant>(V)) return V;
  5193. // If this instruction is evolved from a constant-evolving PHI, compute the
  5194. // exit value from the loop without using SCEVs.
  5195. if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
  5196. if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
  5197. const Loop *LI = (*this->LI)[I->getParent()];
  5198. if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
  5199. if (PHINode *PN = dyn_cast<PHINode>(I))
  5200. if (PN->getParent() == LI->getHeader()) {
  5201. // Okay, there is no closed form solution for the PHI node. Check
  5202. // to see if the loop that contains it has a known backedge-taken
  5203. // count. If so, we may be able to force computation of the exit
  5204. // value.
  5205. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
  5206. if (const SCEVConstant *BTCC =
  5207. dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
  5208. // Okay, we know how many times the containing loop executes. If
  5209. // this is a constant evolving PHI node, get the final value at
  5210. // the specified iteration number.
  5211. Constant *RV = getConstantEvolutionLoopExitValue(PN,
  5212. BTCC->getValue()->getValue(),
  5213. LI);
  5214. if (RV) return getSCEV(RV);
  5215. }
  5216. }
  5217. // Okay, this is an expression that we cannot symbolically evaluate
  5218. // into a SCEV. Check to see if it's possible to symbolically evaluate
  5219. // the arguments into constants, and if so, try to constant propagate the
  5220. // result. This is particularly useful for computing loop exit values.
  5221. if (CanConstantFold(I)) {
  5222. SmallVector<Constant *, 4> Operands;
  5223. bool MadeImprovement = false;
  5224. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  5225. Value *Op = I->getOperand(i);
  5226. if (Constant *C = dyn_cast<Constant>(Op)) {
  5227. Operands.push_back(C);
  5228. continue;
  5229. }
  5230. // If any of the operands is non-constant and if they are
  5231. // non-integer and non-pointer, don't even try to analyze them
  5232. // with scev techniques.
  5233. if (!isSCEVable(Op->getType()))
  5234. return V;
  5235. const SCEV *OrigV = getSCEV(Op);
  5236. const SCEV *OpV = getSCEVAtScope(OrigV, L);
  5237. MadeImprovement |= OrigV != OpV;
  5238. Constant *C = BuildConstantFromSCEV(OpV);
  5239. if (!C) return V;
  5240. if (C->getType() != Op->getType())
  5241. C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
  5242. Op->getType(),
  5243. false),
  5244. C, Op->getType());
  5245. Operands.push_back(C);
  5246. }
  5247. // Check to see if getSCEVAtScope actually made an improvement.
  5248. if (MadeImprovement) {
  5249. Constant *C = nullptr;
  5250. const DataLayout &DL = F->getParent()->getDataLayout();
  5251. if (const CmpInst *CI = dyn_cast<CmpInst>(I))
  5252. C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
  5253. Operands[1], DL, TLI);
  5254. else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
  5255. if (!LI->isVolatile())
  5256. C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
  5257. } else
  5258. C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands,
  5259. DL, TLI);
  5260. if (!C) return V;
  5261. return getSCEV(C);
  5262. }
  5263. }
  5264. }
  5265. // This is some other type of SCEVUnknown, just return it.
  5266. return V;
  5267. }
  5268. if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
  5269. // Avoid performing the look-up in the common case where the specified
  5270. // expression has no loop-variant portions.
  5271. for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
  5272. const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
  5273. if (OpAtScope != Comm->getOperand(i)) {
  5274. // Okay, at least one of these operands is loop variant but might be
  5275. // foldable. Build a new instance of the folded commutative expression.
  5276. SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
  5277. Comm->op_begin()+i);
  5278. NewOps.push_back(OpAtScope);
  5279. for (++i; i != e; ++i) {
  5280. OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
  5281. NewOps.push_back(OpAtScope);
  5282. }
  5283. if (isa<SCEVAddExpr>(Comm))
  5284. return getAddExpr(NewOps);
  5285. if (isa<SCEVMulExpr>(Comm))
  5286. return getMulExpr(NewOps);
  5287. if (isa<SCEVSMaxExpr>(Comm))
  5288. return getSMaxExpr(NewOps);
  5289. if (isa<SCEVUMaxExpr>(Comm))
  5290. return getUMaxExpr(NewOps);
  5291. llvm_unreachable("Unknown commutative SCEV type!");
  5292. }
  5293. }
  5294. // If we got here, all operands are loop invariant.
  5295. return Comm;
  5296. }
  5297. if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
  5298. const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
  5299. const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
  5300. if (LHS == Div->getLHS() && RHS == Div->getRHS())
  5301. return Div; // must be loop invariant
  5302. return getUDivExpr(LHS, RHS);
  5303. }
  5304. // If this is a loop recurrence for a loop that does not contain L, then we
  5305. // are dealing with the final value computed by the loop.
  5306. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
  5307. // First, attempt to evaluate each operand.
  5308. // Avoid performing the look-up in the common case where the specified
  5309. // expression has no loop-variant portions.
  5310. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
  5311. const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
  5312. if (OpAtScope == AddRec->getOperand(i))
  5313. continue;
  5314. // Okay, at least one of these operands is loop variant but might be
  5315. // foldable. Build a new instance of the folded commutative expression.
  5316. SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
  5317. AddRec->op_begin()+i);
  5318. NewOps.push_back(OpAtScope);
  5319. for (++i; i != e; ++i)
  5320. NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
  5321. const SCEV *FoldedRec =
  5322. getAddRecExpr(NewOps, AddRec->getLoop(),
  5323. AddRec->getNoWrapFlags(SCEV::FlagNW));
  5324. AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
  5325. // The addrec may be folded to a nonrecurrence, for example, if the
  5326. // induction variable is multiplied by zero after constant folding. Go
  5327. // ahead and return the folded value.
  5328. if (!AddRec)
  5329. return FoldedRec;
  5330. break;
  5331. }
  5332. // If the scope is outside the addrec's loop, evaluate it by using the
  5333. // loop exit value of the addrec.
  5334. if (!AddRec->getLoop()->contains(L)) {
  5335. // To evaluate this recurrence, we need to know how many times the AddRec
  5336. // loop iterates. Compute this now.
  5337. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
  5338. if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
  5339. // Then, evaluate the AddRec.
  5340. return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
  5341. }
  5342. return AddRec;
  5343. }
  5344. if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
  5345. const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
  5346. if (Op == Cast->getOperand())
  5347. return Cast; // must be loop invariant
  5348. return getZeroExtendExpr(Op, Cast->getType());
  5349. }
  5350. if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
  5351. const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
  5352. if (Op == Cast->getOperand())
  5353. return Cast; // must be loop invariant
  5354. return getSignExtendExpr(Op, Cast->getType());
  5355. }
  5356. if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
  5357. const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
  5358. if (Op == Cast->getOperand())
  5359. return Cast; // must be loop invariant
  5360. return getTruncateExpr(Op, Cast->getType());
  5361. }
  5362. llvm_unreachable("Unknown SCEV type!");
  5363. }
  5364. /// getSCEVAtScope - This is a convenience function which does
  5365. /// getSCEVAtScope(getSCEV(V), L).
  5366. const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
  5367. return getSCEVAtScope(getSCEV(V), L);
  5368. }
  5369. /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
  5370. /// following equation:
  5371. ///
  5372. /// A * X = B (mod N)
  5373. ///
  5374. /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
  5375. /// A and B isn't important.
  5376. ///
  5377. /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
  5378. static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
  5379. ScalarEvolution &SE) {
  5380. uint32_t BW = A.getBitWidth();
  5381. assert(BW == B.getBitWidth() && "Bit widths must be the same.");
  5382. assert(A != 0 && "A must be non-zero.");
  5383. // 1. D = gcd(A, N)
  5384. //
  5385. // The gcd of A and N may have only one prime factor: 2. The number of
  5386. // trailing zeros in A is its multiplicity
  5387. uint32_t Mult2 = A.countTrailingZeros();
  5388. // D = 2^Mult2
  5389. // 2. Check if B is divisible by D.
  5390. //
  5391. // B is divisible by D if and only if the multiplicity of prime factor 2 for B
  5392. // is not less than multiplicity of this prime factor for D.
  5393. if (B.countTrailingZeros() < Mult2)
  5394. return SE.getCouldNotCompute();
  5395. // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
  5396. // modulo (N / D).
  5397. //
  5398. // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
  5399. // bit width during computations.
  5400. APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
  5401. APInt Mod(BW + 1, 0);
  5402. Mod.setBit(BW - Mult2); // Mod = N / D
  5403. APInt I = AD.multiplicativeInverse(Mod);
  5404. // 4. Compute the minimum unsigned root of the equation:
  5405. // I * (B / D) mod (N / D)
  5406. APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
  5407. // The result is guaranteed to be less than 2^BW so we may truncate it to BW
  5408. // bits.
  5409. return SE.getConstant(Result.trunc(BW));
  5410. }
  5411. /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
  5412. /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
  5413. /// might be the same) or two SCEVCouldNotCompute objects.
  5414. ///
  5415. static std::pair<const SCEV *,const SCEV *>
  5416. SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
  5417. assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
  5418. const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
  5419. const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
  5420. const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
  5421. // We currently can only solve this if the coefficients are constants.
  5422. if (!LC || !MC || !NC) {
  5423. const SCEV *CNC = SE.getCouldNotCompute();
  5424. return std::make_pair(CNC, CNC);
  5425. }
  5426. uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
  5427. const APInt &L = LC->getValue()->getValue();
  5428. const APInt &M = MC->getValue()->getValue();
  5429. const APInt &N = NC->getValue()->getValue();
  5430. APInt Two(BitWidth, 2);
  5431. APInt Four(BitWidth, 4);
  5432. {
  5433. using namespace APIntOps;
  5434. const APInt& C = L;
  5435. // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
  5436. // The B coefficient is M-N/2
  5437. APInt B(M);
  5438. B -= sdiv(N,Two);
  5439. // The A coefficient is N/2
  5440. APInt A(N.sdiv(Two));
  5441. // Compute the B^2-4ac term.
  5442. APInt SqrtTerm(B);
  5443. SqrtTerm *= B;
  5444. SqrtTerm -= Four * (A * C);
  5445. if (SqrtTerm.isNegative()) {
  5446. // The loop is provably infinite.
  5447. const SCEV *CNC = SE.getCouldNotCompute();
  5448. return std::make_pair(CNC, CNC);
  5449. }
  5450. // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
  5451. // integer value or else APInt::sqrt() will assert.
  5452. APInt SqrtVal(SqrtTerm.sqrt());
  5453. // Compute the two solutions for the quadratic formula.
  5454. // The divisions must be performed as signed divisions.
  5455. APInt NegB(-B);
  5456. APInt TwoA(A << 1);
  5457. if (TwoA.isMinValue()) {
  5458. const SCEV *CNC = SE.getCouldNotCompute();
  5459. return std::make_pair(CNC, CNC);
  5460. }
  5461. LLVMContext &Context = SE.getContext();
  5462. ConstantInt *Solution1 =
  5463. ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
  5464. ConstantInt *Solution2 =
  5465. ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
  5466. return std::make_pair(SE.getConstant(Solution1),
  5467. SE.getConstant(Solution2));
  5468. } // end APIntOps namespace
  5469. }
  5470. /// HowFarToZero - Return the number of times a backedge comparing the specified
  5471. /// value to zero will execute. If not computable, return CouldNotCompute.
  5472. ///
  5473. /// This is only used for loops with a "x != y" exit test. The exit condition is
  5474. /// now expressed as a single expression, V = x-y. So the exit test is
  5475. /// effectively V != 0. We know and take advantage of the fact that this
  5476. /// expression only being used in a comparison by zero context.
  5477. ScalarEvolution::ExitLimit
  5478. ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool ControlsExit) {
  5479. // If the value is a constant
  5480. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
  5481. // If the value is already zero, the branch will execute zero times.
  5482. if (C->getValue()->isZero()) return C;
  5483. return getCouldNotCompute(); // Otherwise it will loop infinitely.
  5484. }
  5485. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
  5486. if (!AddRec || AddRec->getLoop() != L)
  5487. return getCouldNotCompute();
  5488. // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
  5489. // the quadratic equation to solve it.
  5490. if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
  5491. std::pair<const SCEV *,const SCEV *> Roots =
  5492. SolveQuadraticEquation(AddRec, *this);
  5493. const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
  5494. const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
  5495. if (R1 && R2) {
  5496. #if 0
  5497. dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
  5498. << " sol#2: " << *R2 << "\n";
  5499. #endif
  5500. // Pick the smallest positive root value.
  5501. if (ConstantInt *CB =
  5502. dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
  5503. R1->getValue(),
  5504. R2->getValue()))) {
  5505. if (!CB->getZExtValue())
  5506. std::swap(R1, R2); // R1 is the minimum root now.
  5507. // We can only use this value if the chrec ends up with an exact zero
  5508. // value at this index. When solving for "X*X != 5", for example, we
  5509. // should not accept a root of 2.
  5510. const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
  5511. if (Val->isZero())
  5512. return R1; // We found a quadratic root!
  5513. }
  5514. }
  5515. return getCouldNotCompute();
  5516. }
  5517. // Otherwise we can only handle this if it is affine.
  5518. if (!AddRec->isAffine())
  5519. return getCouldNotCompute();
  5520. // If this is an affine expression, the execution count of this branch is
  5521. // the minimum unsigned root of the following equation:
  5522. //
  5523. // Start + Step*N = 0 (mod 2^BW)
  5524. //
  5525. // equivalent to:
  5526. //
  5527. // Step*N = -Start (mod 2^BW)
  5528. //
  5529. // where BW is the common bit width of Start and Step.
  5530. // Get the initial value for the loop.
  5531. const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
  5532. const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
  5533. // For now we handle only constant steps.
  5534. //
  5535. // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
  5536. // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
  5537. // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
  5538. // We have not yet seen any such cases.
  5539. const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
  5540. if (!StepC || StepC->getValue()->equalsInt(0))
  5541. return getCouldNotCompute();
  5542. // For positive steps (counting up until unsigned overflow):
  5543. // N = -Start/Step (as unsigned)
  5544. // For negative steps (counting down to zero):
  5545. // N = Start/-Step
  5546. // First compute the unsigned distance from zero in the direction of Step.
  5547. bool CountDown = StepC->getValue()->getValue().isNegative();
  5548. const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
  5549. // Handle unitary steps, which cannot wraparound.
  5550. // 1*N = -Start; -1*N = Start (mod 2^BW), so:
  5551. // N = Distance (as unsigned)
  5552. if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
  5553. ConstantRange CR = getUnsignedRange(Start);
  5554. const SCEV *MaxBECount;
  5555. if (!CountDown && CR.getUnsignedMin().isMinValue())
  5556. // When counting up, the worst starting value is 1, not 0.
  5557. MaxBECount = CR.getUnsignedMax().isMinValue()
  5558. ? getConstant(APInt::getMinValue(CR.getBitWidth()))
  5559. : getConstant(APInt::getMaxValue(CR.getBitWidth()));
  5560. else
  5561. MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
  5562. : -CR.getUnsignedMin());
  5563. return ExitLimit(Distance, MaxBECount);
  5564. }
  5565. // As a special case, handle the instance where Step is a positive power of
  5566. // two. In this case, determining whether Step divides Distance evenly can be
  5567. // done by counting and comparing the number of trailing zeros of Step and
  5568. // Distance.
  5569. if (!CountDown) {
  5570. const APInt &StepV = StepC->getValue()->getValue();
  5571. // StepV.isPowerOf2() returns true if StepV is an positive power of two. It
  5572. // also returns true if StepV is maximally negative (eg, INT_MIN), but that
  5573. // case is not handled as this code is guarded by !CountDown.
  5574. if (StepV.isPowerOf2() &&
  5575. GetMinTrailingZeros(Distance) >= StepV.countTrailingZeros())
  5576. return getUDivExactExpr(Distance, Step);
  5577. }
  5578. // If the condition controls loop exit (the loop exits only if the expression
  5579. // is true) and the addition is no-wrap we can use unsigned divide to
  5580. // compute the backedge count. In this case, the step may not divide the
  5581. // distance, but we don't care because if the condition is "missed" the loop
  5582. // will have undefined behavior due to wrapping.
  5583. if (ControlsExit && AddRec->getNoWrapFlags(SCEV::FlagNW)) {
  5584. const SCEV *Exact =
  5585. getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
  5586. return ExitLimit(Exact, Exact);
  5587. }
  5588. // Then, try to solve the above equation provided that Start is constant.
  5589. if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
  5590. return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
  5591. -StartC->getValue()->getValue(),
  5592. *this);
  5593. return getCouldNotCompute();
  5594. }
  5595. /// HowFarToNonZero - Return the number of times a backedge checking the
  5596. /// specified value for nonzero will execute. If not computable, return
  5597. /// CouldNotCompute
  5598. ScalarEvolution::ExitLimit
  5599. ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
  5600. // Loops that look like: while (X == 0) are very strange indeed. We don't
  5601. // handle them yet except for the trivial case. This could be expanded in the
  5602. // future as needed.
  5603. // If the value is a constant, check to see if it is known to be non-zero
  5604. // already. If so, the backedge will execute zero times.
  5605. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
  5606. if (!C->getValue()->isNullValue())
  5607. return getConstant(C->getType(), 0);
  5608. return getCouldNotCompute(); // Otherwise it will loop infinitely.
  5609. }
  5610. // We could implement others, but I really doubt anyone writes loops like
  5611. // this, and if they did, they would already be constant folded.
  5612. return getCouldNotCompute();
  5613. }
  5614. /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
  5615. /// (which may not be an immediate predecessor) which has exactly one
  5616. /// successor from which BB is reachable, or null if no such block is
  5617. /// found.
  5618. ///
  5619. std::pair<BasicBlock *, BasicBlock *>
  5620. ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
  5621. // If the block has a unique predecessor, then there is no path from the
  5622. // predecessor to the block that does not go through the direct edge
  5623. // from the predecessor to the block.
  5624. if (BasicBlock *Pred = BB->getSinglePredecessor())
  5625. return std::make_pair(Pred, BB);
  5626. // A loop's header is defined to be a block that dominates the loop.
  5627. // If the header has a unique predecessor outside the loop, it must be
  5628. // a block that has exactly one successor that can reach the loop.
  5629. if (Loop *L = LI->getLoopFor(BB))
  5630. return std::make_pair(L->getLoopPredecessor(), L->getHeader());
  5631. return std::pair<BasicBlock *, BasicBlock *>();
  5632. }
  5633. /// HasSameValue - SCEV structural equivalence is usually sufficient for
  5634. /// testing whether two expressions are equal, however for the purposes of
  5635. /// looking for a condition guarding a loop, it can be useful to be a little
  5636. /// more general, since a front-end may have replicated the controlling
  5637. /// expression.
  5638. ///
  5639. static bool HasSameValue(const SCEV *A, const SCEV *B) {
  5640. // Quick check to see if they are the same SCEV.
  5641. if (A == B) return true;
  5642. // Otherwise, if they're both SCEVUnknown, it's possible that they hold
  5643. // two different instructions with the same value. Check for this case.
  5644. if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
  5645. if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
  5646. if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
  5647. if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
  5648. if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
  5649. return true;
  5650. // Otherwise assume they may have a different value.
  5651. return false;
  5652. }
  5653. /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
  5654. /// predicate Pred. Return true iff any changes were made.
  5655. ///
  5656. bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
  5657. const SCEV *&LHS, const SCEV *&RHS,
  5658. unsigned Depth) {
  5659. bool Changed = false;
  5660. // If we hit the max recursion limit bail out.
  5661. if (Depth >= 3)
  5662. return false;
  5663. // Canonicalize a constant to the right side.
  5664. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
  5665. // Check for both operands constant.
  5666. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
  5667. if (ConstantExpr::getICmp(Pred,
  5668. LHSC->getValue(),
  5669. RHSC->getValue())->isNullValue())
  5670. goto trivially_false;
  5671. else
  5672. goto trivially_true;
  5673. }
  5674. // Otherwise swap the operands to put the constant on the right.
  5675. std::swap(LHS, RHS);
  5676. Pred = ICmpInst::getSwappedPredicate(Pred);
  5677. Changed = true;
  5678. }
  5679. // If we're comparing an addrec with a value which is loop-invariant in the
  5680. // addrec's loop, put the addrec on the left. Also make a dominance check,
  5681. // as both operands could be addrecs loop-invariant in each other's loop.
  5682. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
  5683. const Loop *L = AR->getLoop();
  5684. if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
  5685. std::swap(LHS, RHS);
  5686. Pred = ICmpInst::getSwappedPredicate(Pred);
  5687. Changed = true;
  5688. }
  5689. }
  5690. // If there's a constant operand, canonicalize comparisons with boundary
  5691. // cases, and canonicalize *-or-equal comparisons to regular comparisons.
  5692. if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
  5693. const APInt &RA = RC->getValue()->getValue();
  5694. switch (Pred) {
  5695. default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  5696. case ICmpInst::ICMP_EQ:
  5697. case ICmpInst::ICMP_NE:
  5698. // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
  5699. if (!RA)
  5700. if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
  5701. if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
  5702. if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
  5703. ME->getOperand(0)->isAllOnesValue()) {
  5704. RHS = AE->getOperand(1);
  5705. LHS = ME->getOperand(1);
  5706. Changed = true;
  5707. }
  5708. break;
  5709. case ICmpInst::ICMP_UGE:
  5710. if ((RA - 1).isMinValue()) {
  5711. Pred = ICmpInst::ICMP_NE;
  5712. RHS = getConstant(RA - 1);
  5713. Changed = true;
  5714. break;
  5715. }
  5716. if (RA.isMaxValue()) {
  5717. Pred = ICmpInst::ICMP_EQ;
  5718. Changed = true;
  5719. break;
  5720. }
  5721. if (RA.isMinValue()) goto trivially_true;
  5722. Pred = ICmpInst::ICMP_UGT;
  5723. RHS = getConstant(RA - 1);
  5724. Changed = true;
  5725. break;
  5726. case ICmpInst::ICMP_ULE:
  5727. if ((RA + 1).isMaxValue()) {
  5728. Pred = ICmpInst::ICMP_NE;
  5729. RHS = getConstant(RA + 1);
  5730. Changed = true;
  5731. break;
  5732. }
  5733. if (RA.isMinValue()) {
  5734. Pred = ICmpInst::ICMP_EQ;
  5735. Changed = true;
  5736. break;
  5737. }
  5738. if (RA.isMaxValue()) goto trivially_true;
  5739. Pred = ICmpInst::ICMP_ULT;
  5740. RHS = getConstant(RA + 1);
  5741. Changed = true;
  5742. break;
  5743. case ICmpInst::ICMP_SGE:
  5744. if ((RA - 1).isMinSignedValue()) {
  5745. Pred = ICmpInst::ICMP_NE;
  5746. RHS = getConstant(RA - 1);
  5747. Changed = true;
  5748. break;
  5749. }
  5750. if (RA.isMaxSignedValue()) {
  5751. Pred = ICmpInst::ICMP_EQ;
  5752. Changed = true;
  5753. break;
  5754. }
  5755. if (RA.isMinSignedValue()) goto trivially_true;
  5756. Pred = ICmpInst::ICMP_SGT;
  5757. RHS = getConstant(RA - 1);
  5758. Changed = true;
  5759. break;
  5760. case ICmpInst::ICMP_SLE:
  5761. if ((RA + 1).isMaxSignedValue()) {
  5762. Pred = ICmpInst::ICMP_NE;
  5763. RHS = getConstant(RA + 1);
  5764. Changed = true;
  5765. break;
  5766. }
  5767. if (RA.isMinSignedValue()) {
  5768. Pred = ICmpInst::ICMP_EQ;
  5769. Changed = true;
  5770. break;
  5771. }
  5772. if (RA.isMaxSignedValue()) goto trivially_true;
  5773. Pred = ICmpInst::ICMP_SLT;
  5774. RHS = getConstant(RA + 1);
  5775. Changed = true;
  5776. break;
  5777. case ICmpInst::ICMP_UGT:
  5778. if (RA.isMinValue()) {
  5779. Pred = ICmpInst::ICMP_NE;
  5780. Changed = true;
  5781. break;
  5782. }
  5783. if ((RA + 1).isMaxValue()) {
  5784. Pred = ICmpInst::ICMP_EQ;
  5785. RHS = getConstant(RA + 1);
  5786. Changed = true;
  5787. break;
  5788. }
  5789. if (RA.isMaxValue()) goto trivially_false;
  5790. break;
  5791. case ICmpInst::ICMP_ULT:
  5792. if (RA.isMaxValue()) {
  5793. Pred = ICmpInst::ICMP_NE;
  5794. Changed = true;
  5795. break;
  5796. }
  5797. if ((RA - 1).isMinValue()) {
  5798. Pred = ICmpInst::ICMP_EQ;
  5799. RHS = getConstant(RA - 1);
  5800. Changed = true;
  5801. break;
  5802. }
  5803. if (RA.isMinValue()) goto trivially_false;
  5804. break;
  5805. case ICmpInst::ICMP_SGT:
  5806. if (RA.isMinSignedValue()) {
  5807. Pred = ICmpInst::ICMP_NE;
  5808. Changed = true;
  5809. break;
  5810. }
  5811. if ((RA + 1).isMaxSignedValue()) {
  5812. Pred = ICmpInst::ICMP_EQ;
  5813. RHS = getConstant(RA + 1);
  5814. Changed = true;
  5815. break;
  5816. }
  5817. if (RA.isMaxSignedValue()) goto trivially_false;
  5818. break;
  5819. case ICmpInst::ICMP_SLT:
  5820. if (RA.isMaxSignedValue()) {
  5821. Pred = ICmpInst::ICMP_NE;
  5822. Changed = true;
  5823. break;
  5824. }
  5825. if ((RA - 1).isMinSignedValue()) {
  5826. Pred = ICmpInst::ICMP_EQ;
  5827. RHS = getConstant(RA - 1);
  5828. Changed = true;
  5829. break;
  5830. }
  5831. if (RA.isMinSignedValue()) goto trivially_false;
  5832. break;
  5833. }
  5834. }
  5835. // Check for obvious equality.
  5836. if (HasSameValue(LHS, RHS)) {
  5837. if (ICmpInst::isTrueWhenEqual(Pred))
  5838. goto trivially_true;
  5839. if (ICmpInst::isFalseWhenEqual(Pred))
  5840. goto trivially_false;
  5841. }
  5842. // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
  5843. // adding or subtracting 1 from one of the operands.
  5844. switch (Pred) {
  5845. case ICmpInst::ICMP_SLE:
  5846. if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
  5847. RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
  5848. SCEV::FlagNSW);
  5849. Pred = ICmpInst::ICMP_SLT;
  5850. Changed = true;
  5851. } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
  5852. LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
  5853. SCEV::FlagNSW);
  5854. Pred = ICmpInst::ICMP_SLT;
  5855. Changed = true;
  5856. }
  5857. break;
  5858. case ICmpInst::ICMP_SGE:
  5859. if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
  5860. RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
  5861. SCEV::FlagNSW);
  5862. Pred = ICmpInst::ICMP_SGT;
  5863. Changed = true;
  5864. } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
  5865. LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
  5866. SCEV::FlagNSW);
  5867. Pred = ICmpInst::ICMP_SGT;
  5868. Changed = true;
  5869. }
  5870. break;
  5871. case ICmpInst::ICMP_ULE:
  5872. if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
  5873. RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
  5874. SCEV::FlagNUW);
  5875. Pred = ICmpInst::ICMP_ULT;
  5876. Changed = true;
  5877. } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
  5878. LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
  5879. SCEV::FlagNUW);
  5880. Pred = ICmpInst::ICMP_ULT;
  5881. Changed = true;
  5882. }
  5883. break;
  5884. case ICmpInst::ICMP_UGE:
  5885. if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
  5886. RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
  5887. SCEV::FlagNUW);
  5888. Pred = ICmpInst::ICMP_UGT;
  5889. Changed = true;
  5890. } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
  5891. LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
  5892. SCEV::FlagNUW);
  5893. Pred = ICmpInst::ICMP_UGT;
  5894. Changed = true;
  5895. }
  5896. break;
  5897. default:
  5898. break;
  5899. }
  5900. // TODO: More simplifications are possible here.
  5901. // Recursively simplify until we either hit a recursion limit or nothing
  5902. // changes.
  5903. if (Changed)
  5904. return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
  5905. return Changed;
  5906. trivially_true:
  5907. // Return 0 == 0.
  5908. LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
  5909. Pred = ICmpInst::ICMP_EQ;
  5910. return true;
  5911. trivially_false:
  5912. // Return 0 != 0.
  5913. LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
  5914. Pred = ICmpInst::ICMP_NE;
  5915. return true;
  5916. }
  5917. bool ScalarEvolution::isKnownNegative(const SCEV *S) {
  5918. return getSignedRange(S).getSignedMax().isNegative();
  5919. }
  5920. bool ScalarEvolution::isKnownPositive(const SCEV *S) {
  5921. return getSignedRange(S).getSignedMin().isStrictlyPositive();
  5922. }
  5923. bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
  5924. return !getSignedRange(S).getSignedMin().isNegative();
  5925. }
  5926. bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
  5927. return !getSignedRange(S).getSignedMax().isStrictlyPositive();
  5928. }
  5929. bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
  5930. return isKnownNegative(S) || isKnownPositive(S);
  5931. }
  5932. bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
  5933. const SCEV *LHS, const SCEV *RHS) {
  5934. // Canonicalize the inputs first.
  5935. (void)SimplifyICmpOperands(Pred, LHS, RHS);
  5936. // If LHS or RHS is an addrec, check to see if the condition is true in
  5937. // every iteration of the loop.
  5938. // If LHS and RHS are both addrec, both conditions must be true in
  5939. // every iteration of the loop.
  5940. const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
  5941. const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
  5942. bool LeftGuarded = false;
  5943. bool RightGuarded = false;
  5944. if (LAR) {
  5945. const Loop *L = LAR->getLoop();
  5946. if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) &&
  5947. isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) {
  5948. if (!RAR) return true;
  5949. LeftGuarded = true;
  5950. }
  5951. }
  5952. if (RAR) {
  5953. const Loop *L = RAR->getLoop();
  5954. if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) &&
  5955. isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) {
  5956. if (!LAR) return true;
  5957. RightGuarded = true;
  5958. }
  5959. }
  5960. if (LeftGuarded && RightGuarded)
  5961. return true;
  5962. // Otherwise see what can be done with known constant ranges.
  5963. return isKnownPredicateWithRanges(Pred, LHS, RHS);
  5964. }
  5965. bool
  5966. ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
  5967. const SCEV *LHS, const SCEV *RHS) {
  5968. if (HasSameValue(LHS, RHS))
  5969. return ICmpInst::isTrueWhenEqual(Pred);
  5970. // This code is split out from isKnownPredicate because it is called from
  5971. // within isLoopEntryGuardedByCond.
  5972. switch (Pred) {
  5973. default:
  5974. llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  5975. case ICmpInst::ICMP_SGT:
  5976. std::swap(LHS, RHS);
  5977. case ICmpInst::ICMP_SLT: {
  5978. ConstantRange LHSRange = getSignedRange(LHS);
  5979. ConstantRange RHSRange = getSignedRange(RHS);
  5980. if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
  5981. return true;
  5982. if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
  5983. return false;
  5984. break;
  5985. }
  5986. case ICmpInst::ICMP_SGE:
  5987. std::swap(LHS, RHS);
  5988. case ICmpInst::ICMP_SLE: {
  5989. ConstantRange LHSRange = getSignedRange(LHS);
  5990. ConstantRange RHSRange = getSignedRange(RHS);
  5991. if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
  5992. return true;
  5993. if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
  5994. return false;
  5995. break;
  5996. }
  5997. case ICmpInst::ICMP_UGT:
  5998. std::swap(LHS, RHS);
  5999. case ICmpInst::ICMP_ULT: {
  6000. ConstantRange LHSRange = getUnsignedRange(LHS);
  6001. ConstantRange RHSRange = getUnsignedRange(RHS);
  6002. if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
  6003. return true;
  6004. if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
  6005. return false;
  6006. break;
  6007. }
  6008. case ICmpInst::ICMP_UGE:
  6009. std::swap(LHS, RHS);
  6010. case ICmpInst::ICMP_ULE: {
  6011. ConstantRange LHSRange = getUnsignedRange(LHS);
  6012. ConstantRange RHSRange = getUnsignedRange(RHS);
  6013. if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
  6014. return true;
  6015. if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
  6016. return false;
  6017. break;
  6018. }
  6019. case ICmpInst::ICMP_NE: {
  6020. if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
  6021. return true;
  6022. if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
  6023. return true;
  6024. const SCEV *Diff = getMinusSCEV(LHS, RHS);
  6025. if (isKnownNonZero(Diff))
  6026. return true;
  6027. break;
  6028. }
  6029. case ICmpInst::ICMP_EQ:
  6030. // The check at the top of the function catches the case where
  6031. // the values are known to be equal.
  6032. break;
  6033. }
  6034. return false;
  6035. }
  6036. /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
  6037. /// protected by a conditional between LHS and RHS. This is used to
  6038. /// to eliminate casts.
  6039. bool
  6040. ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
  6041. ICmpInst::Predicate Pred,
  6042. const SCEV *LHS, const SCEV *RHS) {
  6043. // Interpret a null as meaning no loop, where there is obviously no guard
  6044. // (interprocedural conditions notwithstanding).
  6045. if (!L) return true;
  6046. if (isKnownPredicateWithRanges(Pred, LHS, RHS)) return true;
  6047. BasicBlock *Latch = L->getLoopLatch();
  6048. if (!Latch)
  6049. return false;
  6050. BranchInst *LoopContinuePredicate =
  6051. dyn_cast<BranchInst>(Latch->getTerminator());
  6052. if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
  6053. isImpliedCond(Pred, LHS, RHS,
  6054. LoopContinuePredicate->getCondition(),
  6055. LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
  6056. return true;
  6057. // Check conditions due to any @llvm.assume intrinsics.
  6058. for (auto &AssumeVH : AC->assumptions()) {
  6059. if (!AssumeVH)
  6060. continue;
  6061. auto *CI = cast<CallInst>(AssumeVH);
  6062. if (!DT->dominates(CI, Latch->getTerminator()))
  6063. continue;
  6064. if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
  6065. return true;
  6066. }
  6067. struct ClearWalkingBEDominatingCondsOnExit {
  6068. ScalarEvolution &SE;
  6069. explicit ClearWalkingBEDominatingCondsOnExit(ScalarEvolution &SE)
  6070. : SE(SE){};
  6071. ~ClearWalkingBEDominatingCondsOnExit() {
  6072. SE.WalkingBEDominatingConds = false;
  6073. }
  6074. };
  6075. // We don't want more than one activation of the following loop on the stack
  6076. // -- that can lead to O(n!) time complexity.
  6077. if (WalkingBEDominatingConds)
  6078. return false;
  6079. WalkingBEDominatingConds = true;
  6080. ClearWalkingBEDominatingCondsOnExit ClearOnExit(*this);
  6081. // If the loop is not reachable from the entry block, we risk running into an
  6082. // infinite loop as we walk up into the dom tree. These loops do not matter
  6083. // anyway, so we just return a conservative answer when we see them.
  6084. if (!DT->isReachableFromEntry(L->getHeader()))
  6085. return false;
  6086. for (DomTreeNode *DTN = (*DT)[Latch], *HeaderDTN = (*DT)[L->getHeader()];
  6087. DTN != HeaderDTN;
  6088. DTN = DTN->getIDom()) {
  6089. assert(DTN && "should reach the loop header before reaching the root!");
  6090. BasicBlock *BB = DTN->getBlock();
  6091. BasicBlock *PBB = BB->getSinglePredecessor();
  6092. if (!PBB)
  6093. continue;
  6094. BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
  6095. if (!ContinuePredicate || !ContinuePredicate->isConditional())
  6096. continue;
  6097. Value *Condition = ContinuePredicate->getCondition();
  6098. // If we have an edge `E` within the loop body that dominates the only
  6099. // latch, the condition guarding `E` also guards the backedge. This
  6100. // reasoning works only for loops with a single latch.
  6101. BasicBlockEdge DominatingEdge(PBB, BB);
  6102. if (DominatingEdge.isSingleEdge()) {
  6103. // We're constructively (and conservatively) enumerating edges within the
  6104. // loop body that dominate the latch. The dominator tree better agree
  6105. // with us on this:
  6106. assert(DT->dominates(DominatingEdge, Latch) && "should be!");
  6107. if (isImpliedCond(Pred, LHS, RHS, Condition,
  6108. BB != ContinuePredicate->getSuccessor(0)))
  6109. return true;
  6110. }
  6111. }
  6112. return false;
  6113. }
  6114. /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
  6115. /// by a conditional between LHS and RHS. This is used to help avoid max
  6116. /// expressions in loop trip counts, and to eliminate casts.
  6117. bool
  6118. ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
  6119. ICmpInst::Predicate Pred,
  6120. const SCEV *LHS, const SCEV *RHS) {
  6121. // Interpret a null as meaning no loop, where there is obviously no guard
  6122. // (interprocedural conditions notwithstanding).
  6123. if (!L) return false;
  6124. if (isKnownPredicateWithRanges(Pred, LHS, RHS)) return true;
  6125. // Starting at the loop predecessor, climb up the predecessor chain, as long
  6126. // as there are predecessors that can be found that have unique successors
  6127. // leading to the original header.
  6128. for (std::pair<BasicBlock *, BasicBlock *>
  6129. Pair(L->getLoopPredecessor(), L->getHeader());
  6130. Pair.first;
  6131. Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
  6132. BranchInst *LoopEntryPredicate =
  6133. dyn_cast<BranchInst>(Pair.first->getTerminator());
  6134. if (!LoopEntryPredicate ||
  6135. LoopEntryPredicate->isUnconditional())
  6136. continue;
  6137. if (isImpliedCond(Pred, LHS, RHS,
  6138. LoopEntryPredicate->getCondition(),
  6139. LoopEntryPredicate->getSuccessor(0) != Pair.second))
  6140. return true;
  6141. }
  6142. // Check conditions due to any @llvm.assume intrinsics.
  6143. for (auto &AssumeVH : AC->assumptions()) {
  6144. if (!AssumeVH)
  6145. continue;
  6146. auto *CI = cast<CallInst>(AssumeVH);
  6147. if (!DT->dominates(CI, L->getHeader()))
  6148. continue;
  6149. if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
  6150. return true;
  6151. }
  6152. return false;
  6153. }
  6154. /// RAII wrapper to prevent recursive application of isImpliedCond.
  6155. /// ScalarEvolution's PendingLoopPredicates set must be empty unless we are
  6156. /// currently evaluating isImpliedCond.
  6157. struct MarkPendingLoopPredicate {
  6158. Value *Cond;
  6159. DenseSet<Value*> &LoopPreds;
  6160. bool Pending;
  6161. MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP)
  6162. : Cond(C), LoopPreds(LP) {
  6163. Pending = !LoopPreds.insert(Cond).second;
  6164. }
  6165. ~MarkPendingLoopPredicate() {
  6166. if (!Pending)
  6167. LoopPreds.erase(Cond);
  6168. }
  6169. };
  6170. /// isImpliedCond - Test whether the condition described by Pred, LHS,
  6171. /// and RHS is true whenever the given Cond value evaluates to true.
  6172. bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
  6173. const SCEV *LHS, const SCEV *RHS,
  6174. Value *FoundCondValue,
  6175. bool Inverse) {
  6176. MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates);
  6177. if (Mark.Pending)
  6178. return false;
  6179. // Recursively handle And and Or conditions.
  6180. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
  6181. if (BO->getOpcode() == Instruction::And) {
  6182. if (!Inverse)
  6183. return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
  6184. isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
  6185. } else if (BO->getOpcode() == Instruction::Or) {
  6186. if (Inverse)
  6187. return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
  6188. isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
  6189. }
  6190. }
  6191. ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
  6192. if (!ICI) return false;
  6193. // Now that we found a conditional branch that dominates the loop or controls
  6194. // the loop latch. Check to see if it is the comparison we are looking for.
  6195. ICmpInst::Predicate FoundPred;
  6196. if (Inverse)
  6197. FoundPred = ICI->getInversePredicate();
  6198. else
  6199. FoundPred = ICI->getPredicate();
  6200. const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
  6201. const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
  6202. // Balance the types.
  6203. if (getTypeSizeInBits(LHS->getType()) <
  6204. getTypeSizeInBits(FoundLHS->getType())) {
  6205. if (CmpInst::isSigned(Pred)) {
  6206. LHS = getSignExtendExpr(LHS, FoundLHS->getType());
  6207. RHS = getSignExtendExpr(RHS, FoundLHS->getType());
  6208. } else {
  6209. LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
  6210. RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
  6211. }
  6212. } else if (getTypeSizeInBits(LHS->getType()) >
  6213. getTypeSizeInBits(FoundLHS->getType())) {
  6214. if (CmpInst::isSigned(FoundPred)) {
  6215. FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
  6216. FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
  6217. } else {
  6218. FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
  6219. FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
  6220. }
  6221. }
  6222. // Canonicalize the query to match the way instcombine will have
  6223. // canonicalized the comparison.
  6224. if (SimplifyICmpOperands(Pred, LHS, RHS))
  6225. if (LHS == RHS)
  6226. return CmpInst::isTrueWhenEqual(Pred);
  6227. if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
  6228. if (FoundLHS == FoundRHS)
  6229. return CmpInst::isFalseWhenEqual(FoundPred);
  6230. // Check to see if we can make the LHS or RHS match.
  6231. if (LHS == FoundRHS || RHS == FoundLHS) {
  6232. if (isa<SCEVConstant>(RHS)) {
  6233. std::swap(FoundLHS, FoundRHS);
  6234. FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
  6235. } else {
  6236. std::swap(LHS, RHS);
  6237. Pred = ICmpInst::getSwappedPredicate(Pred);
  6238. }
  6239. }
  6240. // Check whether the found predicate is the same as the desired predicate.
  6241. if (FoundPred == Pred)
  6242. return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
  6243. // Check whether swapping the found predicate makes it the same as the
  6244. // desired predicate.
  6245. if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
  6246. if (isa<SCEVConstant>(RHS))
  6247. return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
  6248. else
  6249. return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
  6250. RHS, LHS, FoundLHS, FoundRHS);
  6251. }
  6252. // Check if we can make progress by sharpening ranges.
  6253. if (FoundPred == ICmpInst::ICMP_NE &&
  6254. (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
  6255. const SCEVConstant *C = nullptr;
  6256. const SCEV *V = nullptr;
  6257. if (isa<SCEVConstant>(FoundLHS)) {
  6258. C = cast<SCEVConstant>(FoundLHS);
  6259. V = FoundRHS;
  6260. } else {
  6261. C = cast<SCEVConstant>(FoundRHS);
  6262. V = FoundLHS;
  6263. }
  6264. // The guarding predicate tells us that C != V. If the known range
  6265. // of V is [C, t), we can sharpen the range to [C + 1, t). The
  6266. // range we consider has to correspond to same signedness as the
  6267. // predicate we're interested in folding.
  6268. APInt Min = ICmpInst::isSigned(Pred) ?
  6269. getSignedRange(V).getSignedMin() : getUnsignedRange(V).getUnsignedMin();
  6270. if (Min == C->getValue()->getValue()) {
  6271. // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
  6272. // This is true even if (Min + 1) wraps around -- in case of
  6273. // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
  6274. APInt SharperMin = Min + 1;
  6275. switch (Pred) {
  6276. case ICmpInst::ICMP_SGE:
  6277. case ICmpInst::ICMP_UGE:
  6278. // We know V `Pred` SharperMin. If this implies LHS `Pred`
  6279. // RHS, we're done.
  6280. if (isImpliedCondOperands(Pred, LHS, RHS, V,
  6281. getConstant(SharperMin)))
  6282. return true;
  6283. case ICmpInst::ICMP_SGT:
  6284. case ICmpInst::ICMP_UGT:
  6285. // We know from the range information that (V `Pred` Min ||
  6286. // V == Min). We know from the guarding condition that !(V
  6287. // == Min). This gives us
  6288. //
  6289. // V `Pred` Min || V == Min && !(V == Min)
  6290. // => V `Pred` Min
  6291. //
  6292. // If V `Pred` Min implies LHS `Pred` RHS, we're done.
  6293. if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min)))
  6294. return true;
  6295. default:
  6296. // No change
  6297. break;
  6298. }
  6299. }
  6300. }
  6301. // Check whether the actual condition is beyond sufficient.
  6302. if (FoundPred == ICmpInst::ICMP_EQ)
  6303. if (ICmpInst::isTrueWhenEqual(Pred))
  6304. if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
  6305. return true;
  6306. if (Pred == ICmpInst::ICMP_NE)
  6307. if (!ICmpInst::isTrueWhenEqual(FoundPred))
  6308. if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
  6309. return true;
  6310. // Otherwise assume the worst.
  6311. return false;
  6312. }
  6313. /// isImpliedCondOperands - Test whether the condition described by Pred,
  6314. /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
  6315. /// and FoundRHS is true.
  6316. bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
  6317. const SCEV *LHS, const SCEV *RHS,
  6318. const SCEV *FoundLHS,
  6319. const SCEV *FoundRHS) {
  6320. if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
  6321. return true;
  6322. return isImpliedCondOperandsHelper(Pred, LHS, RHS,
  6323. FoundLHS, FoundRHS) ||
  6324. // ~x < ~y --> x > y
  6325. isImpliedCondOperandsHelper(Pred, LHS, RHS,
  6326. getNotSCEV(FoundRHS),
  6327. getNotSCEV(FoundLHS));
  6328. }
  6329. /// If Expr computes ~A, return A else return nullptr
  6330. static const SCEV *MatchNotExpr(const SCEV *Expr) {
  6331. const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
  6332. if (!Add || Add->getNumOperands() != 2) return nullptr;
  6333. const SCEVConstant *AddLHS = dyn_cast<SCEVConstant>(Add->getOperand(0));
  6334. if (!(AddLHS && AddLHS->getValue()->getValue().isAllOnesValue()))
  6335. return nullptr;
  6336. const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
  6337. if (!AddRHS || AddRHS->getNumOperands() != 2) return nullptr;
  6338. const SCEVConstant *MulLHS = dyn_cast<SCEVConstant>(AddRHS->getOperand(0));
  6339. if (!(MulLHS && MulLHS->getValue()->getValue().isAllOnesValue()))
  6340. return nullptr;
  6341. return AddRHS->getOperand(1);
  6342. }
  6343. /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values?
  6344. template<typename MaxExprType>
  6345. static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr,
  6346. const SCEV *Candidate) {
  6347. const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr);
  6348. if (!MaxExpr) return false;
  6349. auto It = std::find(MaxExpr->op_begin(), MaxExpr->op_end(), Candidate);
  6350. return It != MaxExpr->op_end();
  6351. }
  6352. /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values?
  6353. template<typename MaxExprType>
  6354. static bool IsMinConsistingOf(ScalarEvolution &SE,
  6355. const SCEV *MaybeMinExpr,
  6356. const SCEV *Candidate) {
  6357. const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr);
  6358. if (!MaybeMaxExpr)
  6359. return false;
  6360. return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate));
  6361. }
  6362. /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
  6363. /// expression?
  6364. static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
  6365. ICmpInst::Predicate Pred,
  6366. const SCEV *LHS, const SCEV *RHS) {
  6367. switch (Pred) {
  6368. default:
  6369. return false;
  6370. case ICmpInst::ICMP_SGE:
  6371. std::swap(LHS, RHS);
  6372. // fall through
  6373. case ICmpInst::ICMP_SLE:
  6374. return
  6375. // min(A, ...) <= A
  6376. IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) ||
  6377. // A <= max(A, ...)
  6378. IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
  6379. case ICmpInst::ICMP_UGE:
  6380. std::swap(LHS, RHS);
  6381. // fall through
  6382. case ICmpInst::ICMP_ULE:
  6383. return
  6384. // min(A, ...) <= A
  6385. IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) ||
  6386. // A <= max(A, ...)
  6387. IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
  6388. }
  6389. llvm_unreachable("covered switch fell through?!");
  6390. }
  6391. /// isImpliedCondOperandsHelper - Test whether the condition described by
  6392. /// Pred, LHS, and RHS is true whenever the condition described by Pred,
  6393. /// FoundLHS, and FoundRHS is true.
  6394. bool
  6395. ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
  6396. const SCEV *LHS, const SCEV *RHS,
  6397. const SCEV *FoundLHS,
  6398. const SCEV *FoundRHS) {
  6399. auto IsKnownPredicateFull =
  6400. [this](ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
  6401. return isKnownPredicateWithRanges(Pred, LHS, RHS) ||
  6402. IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS);
  6403. };
  6404. switch (Pred) {
  6405. default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  6406. case ICmpInst::ICMP_EQ:
  6407. case ICmpInst::ICMP_NE:
  6408. if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
  6409. return true;
  6410. break;
  6411. case ICmpInst::ICMP_SLT:
  6412. case ICmpInst::ICMP_SLE:
  6413. if (IsKnownPredicateFull(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
  6414. IsKnownPredicateFull(ICmpInst::ICMP_SGE, RHS, FoundRHS))
  6415. return true;
  6416. break;
  6417. case ICmpInst::ICMP_SGT:
  6418. case ICmpInst::ICMP_SGE:
  6419. if (IsKnownPredicateFull(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
  6420. IsKnownPredicateFull(ICmpInst::ICMP_SLE, RHS, FoundRHS))
  6421. return true;
  6422. break;
  6423. case ICmpInst::ICMP_ULT:
  6424. case ICmpInst::ICMP_ULE:
  6425. if (IsKnownPredicateFull(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
  6426. IsKnownPredicateFull(ICmpInst::ICMP_UGE, RHS, FoundRHS))
  6427. return true;
  6428. break;
  6429. case ICmpInst::ICMP_UGT:
  6430. case ICmpInst::ICMP_UGE:
  6431. if (IsKnownPredicateFull(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
  6432. IsKnownPredicateFull(ICmpInst::ICMP_ULE, RHS, FoundRHS))
  6433. return true;
  6434. break;
  6435. }
  6436. return false;
  6437. }
  6438. /// isImpliedCondOperandsViaRanges - helper function for isImpliedCondOperands.
  6439. /// Tries to get cases like "X `sgt` 0 => X - 1 `sgt` -1".
  6440. bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
  6441. const SCEV *LHS,
  6442. const SCEV *RHS,
  6443. const SCEV *FoundLHS,
  6444. const SCEV *FoundRHS) {
  6445. if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
  6446. // The restriction on `FoundRHS` be lifted easily -- it exists only to
  6447. // reduce the compile time impact of this optimization.
  6448. return false;
  6449. const SCEVAddExpr *AddLHS = dyn_cast<SCEVAddExpr>(LHS);
  6450. if (!AddLHS || AddLHS->getOperand(1) != FoundLHS ||
  6451. !isa<SCEVConstant>(AddLHS->getOperand(0)))
  6452. return false;
  6453. APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getValue()->getValue();
  6454. // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
  6455. // antecedent "`FoundLHS` `Pred` `FoundRHS`".
  6456. ConstantRange FoundLHSRange =
  6457. ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS);
  6458. // Since `LHS` is `FoundLHS` + `AddLHS->getOperand(0)`, we can compute a range
  6459. // for `LHS`:
  6460. APInt Addend =
  6461. cast<SCEVConstant>(AddLHS->getOperand(0))->getValue()->getValue();
  6462. ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(Addend));
  6463. // We can also compute the range of values for `LHS` that satisfy the
  6464. // consequent, "`LHS` `Pred` `RHS`":
  6465. APInt ConstRHS = cast<SCEVConstant>(RHS)->getValue()->getValue();
  6466. ConstantRange SatisfyingLHSRange =
  6467. ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
  6468. // The antecedent implies the consequent if every value of `LHS` that
  6469. // satisfies the antecedent also satisfies the consequent.
  6470. return SatisfyingLHSRange.contains(LHSRange);
  6471. }
  6472. // Verify if an linear IV with positive stride can overflow when in a
  6473. // less-than comparison, knowing the invariant term of the comparison, the
  6474. // stride and the knowledge of NSW/NUW flags on the recurrence.
  6475. bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
  6476. bool IsSigned, bool NoWrap) {
  6477. if (NoWrap) return false;
  6478. unsigned BitWidth = getTypeSizeInBits(RHS->getType());
  6479. const SCEV *One = getConstant(Stride->getType(), 1);
  6480. if (IsSigned) {
  6481. APInt MaxRHS = getSignedRange(RHS).getSignedMax();
  6482. APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
  6483. APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
  6484. .getSignedMax();
  6485. // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
  6486. return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
  6487. }
  6488. APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
  6489. APInt MaxValue = APInt::getMaxValue(BitWidth);
  6490. APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
  6491. .getUnsignedMax();
  6492. // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
  6493. return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
  6494. }
  6495. // Verify if an linear IV with negative stride can overflow when in a
  6496. // greater-than comparison, knowing the invariant term of the comparison,
  6497. // the stride and the knowledge of NSW/NUW flags on the recurrence.
  6498. bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
  6499. bool IsSigned, bool NoWrap) {
  6500. if (NoWrap) return false;
  6501. unsigned BitWidth = getTypeSizeInBits(RHS->getType());
  6502. const SCEV *One = getConstant(Stride->getType(), 1);
  6503. if (IsSigned) {
  6504. APInt MinRHS = getSignedRange(RHS).getSignedMin();
  6505. APInt MinValue = APInt::getSignedMinValue(BitWidth);
  6506. APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
  6507. .getSignedMax();
  6508. // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
  6509. return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
  6510. }
  6511. APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
  6512. APInt MinValue = APInt::getMinValue(BitWidth);
  6513. APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
  6514. .getUnsignedMax();
  6515. // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
  6516. return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
  6517. }
  6518. // Compute the backedge taken count knowing the interval difference, the
  6519. // stride and presence of the equality in the comparison.
  6520. const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
  6521. bool Equality) {
  6522. const SCEV *One = getConstant(Step->getType(), 1);
  6523. Delta = Equality ? getAddExpr(Delta, Step)
  6524. : getAddExpr(Delta, getMinusSCEV(Step, One));
  6525. return getUDivExpr(Delta, Step);
  6526. }
  6527. /// HowManyLessThans - Return the number of times a backedge containing the
  6528. /// specified less-than comparison will execute. If not computable, return
  6529. /// CouldNotCompute.
  6530. ///
  6531. /// @param ControlsExit is true when the LHS < RHS condition directly controls
  6532. /// the branch (loops exits only if condition is true). In this case, we can use
  6533. /// NoWrapFlags to skip overflow checks.
  6534. ScalarEvolution::ExitLimit
  6535. ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
  6536. const Loop *L, bool IsSigned,
  6537. bool ControlsExit) {
  6538. // We handle only IV < Invariant
  6539. if (!isLoopInvariant(RHS, L))
  6540. return getCouldNotCompute();
  6541. const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
  6542. // Avoid weird loops
  6543. if (!IV || IV->getLoop() != L || !IV->isAffine())
  6544. return getCouldNotCompute();
  6545. bool NoWrap = ControlsExit &&
  6546. IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
  6547. const SCEV *Stride = IV->getStepRecurrence(*this);
  6548. // Avoid negative or zero stride values
  6549. if (!isKnownPositive(Stride))
  6550. return getCouldNotCompute();
  6551. // Avoid proven overflow cases: this will ensure that the backedge taken count
  6552. // will not generate any unsigned overflow. Relaxed no-overflow conditions
  6553. // exploit NoWrapFlags, allowing to optimize in presence of undefined
  6554. // behaviors like the case of C language.
  6555. if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
  6556. return getCouldNotCompute();
  6557. ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
  6558. : ICmpInst::ICMP_ULT;
  6559. const SCEV *Start = IV->getStart();
  6560. const SCEV *End = RHS;
  6561. if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) {
  6562. const SCEV *Diff = getMinusSCEV(RHS, Start);
  6563. // If we have NoWrap set, then we can assume that the increment won't
  6564. // overflow, in which case if RHS - Start is a constant, we don't need to
  6565. // do a max operation since we can just figure it out statically
  6566. if (NoWrap && isa<SCEVConstant>(Diff)) {
  6567. APInt D = dyn_cast<const SCEVConstant>(Diff)->getValue()->getValue();
  6568. if (D.isNegative())
  6569. End = Start;
  6570. } else
  6571. End = IsSigned ? getSMaxExpr(RHS, Start)
  6572. : getUMaxExpr(RHS, Start);
  6573. }
  6574. const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
  6575. APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin()
  6576. : getUnsignedRange(Start).getUnsignedMin();
  6577. APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
  6578. : getUnsignedRange(Stride).getUnsignedMin();
  6579. unsigned BitWidth = getTypeSizeInBits(LHS->getType());
  6580. APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1)
  6581. : APInt::getMaxValue(BitWidth) - (MinStride - 1);
  6582. // Although End can be a MAX expression we estimate MaxEnd considering only
  6583. // the case End = RHS. This is safe because in the other case (End - Start)
  6584. // is zero, leading to a zero maximum backedge taken count.
  6585. APInt MaxEnd =
  6586. IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit)
  6587. : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit);
  6588. const SCEV *MaxBECount;
  6589. if (isa<SCEVConstant>(BECount))
  6590. MaxBECount = BECount;
  6591. else
  6592. MaxBECount = computeBECount(getConstant(MaxEnd - MinStart),
  6593. getConstant(MinStride), false);
  6594. if (isa<SCEVCouldNotCompute>(MaxBECount))
  6595. MaxBECount = BECount;
  6596. return ExitLimit(BECount, MaxBECount);
  6597. }
  6598. ScalarEvolution::ExitLimit
  6599. ScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
  6600. const Loop *L, bool IsSigned,
  6601. bool ControlsExit) {
  6602. // We handle only IV > Invariant
  6603. if (!isLoopInvariant(RHS, L))
  6604. return getCouldNotCompute();
  6605. const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
  6606. // Avoid weird loops
  6607. if (!IV || IV->getLoop() != L || !IV->isAffine())
  6608. return getCouldNotCompute();
  6609. bool NoWrap = ControlsExit &&
  6610. IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
  6611. const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
  6612. // Avoid negative or zero stride values
  6613. if (!isKnownPositive(Stride))
  6614. return getCouldNotCompute();
  6615. // Avoid proven overflow cases: this will ensure that the backedge taken count
  6616. // will not generate any unsigned overflow. Relaxed no-overflow conditions
  6617. // exploit NoWrapFlags, allowing to optimize in presence of undefined
  6618. // behaviors like the case of C language.
  6619. if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
  6620. return getCouldNotCompute();
  6621. ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
  6622. : ICmpInst::ICMP_UGT;
  6623. const SCEV *Start = IV->getStart();
  6624. const SCEV *End = RHS;
  6625. if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
  6626. const SCEV *Diff = getMinusSCEV(RHS, Start);
  6627. // If we have NoWrap set, then we can assume that the increment won't
  6628. // overflow, in which case if RHS - Start is a constant, we don't need to
  6629. // do a max operation since we can just figure it out statically
  6630. if (NoWrap && isa<SCEVConstant>(Diff)) {
  6631. APInt D = dyn_cast<const SCEVConstant>(Diff)->getValue()->getValue();
  6632. if (!D.isNegative())
  6633. End = Start;
  6634. } else
  6635. End = IsSigned ? getSMinExpr(RHS, Start)
  6636. : getUMinExpr(RHS, Start);
  6637. }
  6638. const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
  6639. APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax()
  6640. : getUnsignedRange(Start).getUnsignedMax();
  6641. APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
  6642. : getUnsignedRange(Stride).getUnsignedMin();
  6643. unsigned BitWidth = getTypeSizeInBits(LHS->getType());
  6644. APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
  6645. : APInt::getMinValue(BitWidth) + (MinStride - 1);
  6646. // Although End can be a MIN expression we estimate MinEnd considering only
  6647. // the case End = RHS. This is safe because in the other case (Start - End)
  6648. // is zero, leading to a zero maximum backedge taken count.
  6649. APInt MinEnd =
  6650. IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit)
  6651. : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit);
  6652. const SCEV *MaxBECount = getCouldNotCompute();
  6653. if (isa<SCEVConstant>(BECount))
  6654. MaxBECount = BECount;
  6655. else
  6656. MaxBECount = computeBECount(getConstant(MaxStart - MinEnd),
  6657. getConstant(MinStride), false);
  6658. if (isa<SCEVCouldNotCompute>(MaxBECount))
  6659. MaxBECount = BECount;
  6660. return ExitLimit(BECount, MaxBECount);
  6661. }
  6662. /// getNumIterationsInRange - Return the number of iterations of this loop that
  6663. /// produce values in the specified constant range. Another way of looking at
  6664. /// this is that it returns the first iteration number where the value is not in
  6665. /// the condition, thus computing the exit count. If the iteration count can't
  6666. /// be computed, an instance of SCEVCouldNotCompute is returned.
  6667. const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
  6668. ScalarEvolution &SE) const {
  6669. if (Range.isFullSet()) // Infinite loop.
  6670. return SE.getCouldNotCompute();
  6671. // If the start is a non-zero constant, shift the range to simplify things.
  6672. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
  6673. if (!SC->getValue()->isZero()) {
  6674. SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
  6675. Operands[0] = SE.getConstant(SC->getType(), 0);
  6676. const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
  6677. getNoWrapFlags(FlagNW));
  6678. if (const SCEVAddRecExpr *ShiftedAddRec =
  6679. dyn_cast<SCEVAddRecExpr>(Shifted))
  6680. return ShiftedAddRec->getNumIterationsInRange(
  6681. Range.subtract(SC->getValue()->getValue()), SE);
  6682. // This is strange and shouldn't happen.
  6683. return SE.getCouldNotCompute();
  6684. }
  6685. // The only time we can solve this is when we have all constant indices.
  6686. // Otherwise, we cannot determine the overflow conditions.
  6687. for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
  6688. if (!isa<SCEVConstant>(getOperand(i)))
  6689. return SE.getCouldNotCompute();
  6690. // Okay at this point we know that all elements of the chrec are constants and
  6691. // that the start element is zero.
  6692. // First check to see if the range contains zero. If not, the first
  6693. // iteration exits.
  6694. unsigned BitWidth = SE.getTypeSizeInBits(getType());
  6695. if (!Range.contains(APInt(BitWidth, 0)))
  6696. return SE.getConstant(getType(), 0);
  6697. if (isAffine()) {
  6698. // If this is an affine expression then we have this situation:
  6699. // Solve {0,+,A} in Range === Ax in Range
  6700. // We know that zero is in the range. If A is positive then we know that
  6701. // the upper value of the range must be the first possible exit value.
  6702. // If A is negative then the lower of the range is the last possible loop
  6703. // value. Also note that we already checked for a full range.
  6704. APInt One(BitWidth,1);
  6705. APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
  6706. APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
  6707. // The exit value should be (End+A)/A.
  6708. APInt ExitVal = (End + A).udiv(A);
  6709. ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
  6710. // Evaluate at the exit value. If we really did fall out of the valid
  6711. // range, then we computed our trip count, otherwise wrap around or other
  6712. // things must have happened.
  6713. ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
  6714. if (Range.contains(Val->getValue()))
  6715. return SE.getCouldNotCompute(); // Something strange happened
  6716. // Ensure that the previous value is in the range. This is a sanity check.
  6717. assert(Range.contains(
  6718. EvaluateConstantChrecAtConstant(this,
  6719. ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
  6720. "Linear scev computation is off in a bad way!");
  6721. return SE.getConstant(ExitValue);
  6722. } else if (isQuadratic()) {
  6723. // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
  6724. // quadratic equation to solve it. To do this, we must frame our problem in
  6725. // terms of figuring out when zero is crossed, instead of when
  6726. // Range.getUpper() is crossed.
  6727. SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
  6728. NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
  6729. const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
  6730. // getNoWrapFlags(FlagNW)
  6731. FlagAnyWrap);
  6732. // Next, solve the constructed addrec
  6733. std::pair<const SCEV *,const SCEV *> Roots =
  6734. SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
  6735. const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
  6736. const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
  6737. if (R1) {
  6738. // Pick the smallest positive root value.
  6739. if (ConstantInt *CB =
  6740. dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
  6741. R1->getValue(), R2->getValue()))) {
  6742. if (!CB->getZExtValue())
  6743. std::swap(R1, R2); // R1 is the minimum root now.
  6744. // Make sure the root is not off by one. The returned iteration should
  6745. // not be in the range, but the previous one should be. When solving
  6746. // for "X*X < 5", for example, we should not return a root of 2.
  6747. ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
  6748. R1->getValue(),
  6749. SE);
  6750. if (Range.contains(R1Val->getValue())) {
  6751. // The next iteration must be out of the range...
  6752. ConstantInt *NextVal =
  6753. ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
  6754. R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
  6755. if (!Range.contains(R1Val->getValue()))
  6756. return SE.getConstant(NextVal);
  6757. return SE.getCouldNotCompute(); // Something strange happened
  6758. }
  6759. // If R1 was not in the range, then it is a good return value. Make
  6760. // sure that R1-1 WAS in the range though, just in case.
  6761. ConstantInt *NextVal =
  6762. ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
  6763. R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
  6764. if (Range.contains(R1Val->getValue()))
  6765. return R1;
  6766. return SE.getCouldNotCompute(); // Something strange happened
  6767. }
  6768. }
  6769. }
  6770. return SE.getCouldNotCompute();
  6771. }
  6772. namespace {
  6773. struct FindUndefs {
  6774. bool Found;
  6775. FindUndefs() : Found(false) {}
  6776. bool follow(const SCEV *S) {
  6777. if (const SCEVUnknown *C = dyn_cast<SCEVUnknown>(S)) {
  6778. if (isa<UndefValue>(C->getValue()))
  6779. Found = true;
  6780. } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
  6781. if (isa<UndefValue>(C->getValue()))
  6782. Found = true;
  6783. }
  6784. // Keep looking if we haven't found it yet.
  6785. return !Found;
  6786. }
  6787. bool isDone() const {
  6788. // Stop recursion if we have found an undef.
  6789. return Found;
  6790. }
  6791. };
  6792. }
  6793. // Return true when S contains at least an undef value.
  6794. static inline bool
  6795. containsUndefs(const SCEV *S) {
  6796. FindUndefs F;
  6797. SCEVTraversal<FindUndefs> ST(F);
  6798. ST.visitAll(S);
  6799. return F.Found;
  6800. }
  6801. namespace {
  6802. // Collect all steps of SCEV expressions.
  6803. struct SCEVCollectStrides {
  6804. ScalarEvolution &SE;
  6805. SmallVectorImpl<const SCEV *> &Strides;
  6806. SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
  6807. : SE(SE), Strides(S) {}
  6808. bool follow(const SCEV *S) {
  6809. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
  6810. Strides.push_back(AR->getStepRecurrence(SE));
  6811. return true;
  6812. }
  6813. bool isDone() const { return false; }
  6814. };
  6815. // Collect all SCEVUnknown and SCEVMulExpr expressions.
  6816. struct SCEVCollectTerms {
  6817. SmallVectorImpl<const SCEV *> &Terms;
  6818. SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T)
  6819. : Terms(T) {}
  6820. bool follow(const SCEV *S) {
  6821. if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S)) {
  6822. if (!containsUndefs(S))
  6823. Terms.push_back(S);
  6824. // Stop recursion: once we collected a term, do not walk its operands.
  6825. return false;
  6826. }
  6827. // Keep looking.
  6828. return true;
  6829. }
  6830. bool isDone() const { return false; }
  6831. };
  6832. }
  6833. /// Find parametric terms in this SCEVAddRecExpr.
  6834. void ScalarEvolution::collectParametricTerms(const SCEV *Expr,
  6835. SmallVectorImpl<const SCEV *> &Terms) {
  6836. SmallVector<const SCEV *, 4> Strides;
  6837. SCEVCollectStrides StrideCollector(*this, Strides);
  6838. visitAll(Expr, StrideCollector);
  6839. DEBUG({
  6840. dbgs() << "Strides:\n";
  6841. for (const SCEV *S : Strides)
  6842. dbgs() << *S << "\n";
  6843. });
  6844. for (const SCEV *S : Strides) {
  6845. SCEVCollectTerms TermCollector(Terms);
  6846. visitAll(S, TermCollector);
  6847. }
  6848. DEBUG({
  6849. dbgs() << "Terms:\n";
  6850. for (const SCEV *T : Terms)
  6851. dbgs() << *T << "\n";
  6852. });
  6853. }
  6854. static bool findArrayDimensionsRec(ScalarEvolution &SE,
  6855. SmallVectorImpl<const SCEV *> &Terms,
  6856. SmallVectorImpl<const SCEV *> &Sizes) {
  6857. int Last = Terms.size() - 1;
  6858. const SCEV *Step = Terms[Last];
  6859. // End of recursion.
  6860. if (Last == 0) {
  6861. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
  6862. SmallVector<const SCEV *, 2> Qs;
  6863. for (const SCEV *Op : M->operands())
  6864. if (!isa<SCEVConstant>(Op))
  6865. Qs.push_back(Op);
  6866. Step = SE.getMulExpr(Qs);
  6867. }
  6868. Sizes.push_back(Step);
  6869. return true;
  6870. }
  6871. for (const SCEV *&Term : Terms) {
  6872. // Normalize the terms before the next call to findArrayDimensionsRec.
  6873. const SCEV *Q, *R;
  6874. SCEVDivision::divide(SE, Term, Step, &Q, &R);
  6875. // Bail out when GCD does not evenly divide one of the terms.
  6876. if (!R->isZero())
  6877. return false;
  6878. Term = Q;
  6879. }
  6880. // Remove all SCEVConstants.
  6881. Terms.erase(std::remove_if(Terms.begin(), Terms.end(), [](const SCEV *E) {
  6882. return isa<SCEVConstant>(E);
  6883. }),
  6884. Terms.end());
  6885. if (Terms.size() > 0)
  6886. if (!findArrayDimensionsRec(SE, Terms, Sizes))
  6887. return false;
  6888. Sizes.push_back(Step);
  6889. return true;
  6890. }
  6891. namespace {
  6892. struct FindParameter {
  6893. bool FoundParameter;
  6894. FindParameter() : FoundParameter(false) {}
  6895. bool follow(const SCEV *S) {
  6896. if (isa<SCEVUnknown>(S)) {
  6897. FoundParameter = true;
  6898. // Stop recursion: we found a parameter.
  6899. return false;
  6900. }
  6901. // Keep looking.
  6902. return true;
  6903. }
  6904. bool isDone() const {
  6905. // Stop recursion if we have found a parameter.
  6906. return FoundParameter;
  6907. }
  6908. };
  6909. }
  6910. // Returns true when S contains at least a SCEVUnknown parameter.
  6911. static inline bool
  6912. containsParameters(const SCEV *S) {
  6913. FindParameter F;
  6914. SCEVTraversal<FindParameter> ST(F);
  6915. ST.visitAll(S);
  6916. return F.FoundParameter;
  6917. }
  6918. // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
  6919. static inline bool
  6920. containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
  6921. for (const SCEV *T : Terms)
  6922. if (containsParameters(T))
  6923. return true;
  6924. return false;
  6925. }
  6926. // Return the number of product terms in S.
  6927. static inline int numberOfTerms(const SCEV *S) {
  6928. if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
  6929. return Expr->getNumOperands();
  6930. return 1;
  6931. }
  6932. static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
  6933. if (isa<SCEVConstant>(T))
  6934. return nullptr;
  6935. if (isa<SCEVUnknown>(T))
  6936. return T;
  6937. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
  6938. SmallVector<const SCEV *, 2> Factors;
  6939. for (const SCEV *Op : M->operands())
  6940. if (!isa<SCEVConstant>(Op))
  6941. Factors.push_back(Op);
  6942. return SE.getMulExpr(Factors);
  6943. }
  6944. return T;
  6945. }
  6946. /// Return the size of an element read or written by Inst.
  6947. const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
  6948. Type *Ty;
  6949. if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
  6950. Ty = Store->getValueOperand()->getType();
  6951. else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
  6952. Ty = Load->getType();
  6953. else
  6954. return nullptr;
  6955. Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
  6956. return getSizeOfExpr(ETy, Ty);
  6957. }
  6958. /// Second step of delinearization: compute the array dimensions Sizes from the
  6959. /// set of Terms extracted from the memory access function of this SCEVAddRec.
  6960. void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
  6961. SmallVectorImpl<const SCEV *> &Sizes,
  6962. const SCEV *ElementSize) const {
  6963. if (Terms.size() < 1 || !ElementSize)
  6964. return;
  6965. // Early return when Terms do not contain parameters: we do not delinearize
  6966. // non parametric SCEVs.
  6967. if (!containsParameters(Terms))
  6968. return;
  6969. DEBUG({
  6970. dbgs() << "Terms:\n";
  6971. for (const SCEV *T : Terms)
  6972. dbgs() << *T << "\n";
  6973. });
  6974. // Remove duplicates.
  6975. std::sort(Terms.begin(), Terms.end());
  6976. Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
  6977. // Put larger terms first.
  6978. std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) {
  6979. return numberOfTerms(LHS) > numberOfTerms(RHS);
  6980. });
  6981. ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
  6982. // Divide all terms by the element size.
  6983. for (const SCEV *&Term : Terms) {
  6984. const SCEV *Q, *R;
  6985. SCEVDivision::divide(SE, Term, ElementSize, &Q, &R);
  6986. Term = Q;
  6987. }
  6988. SmallVector<const SCEV *, 4> NewTerms;
  6989. // Remove constant factors.
  6990. for (const SCEV *T : Terms)
  6991. if (const SCEV *NewT = removeConstantFactors(SE, T))
  6992. NewTerms.push_back(NewT);
  6993. DEBUG({
  6994. dbgs() << "Terms after sorting:\n";
  6995. for (const SCEV *T : NewTerms)
  6996. dbgs() << *T << "\n";
  6997. });
  6998. if (NewTerms.empty() ||
  6999. !findArrayDimensionsRec(SE, NewTerms, Sizes)) {
  7000. Sizes.clear();
  7001. return;
  7002. }
  7003. // The last element to be pushed into Sizes is the size of an element.
  7004. Sizes.push_back(ElementSize);
  7005. DEBUG({
  7006. dbgs() << "Sizes:\n";
  7007. for (const SCEV *S : Sizes)
  7008. dbgs() << *S << "\n";
  7009. });
  7010. }
  7011. /// Third step of delinearization: compute the access functions for the
  7012. /// Subscripts based on the dimensions in Sizes.
  7013. void ScalarEvolution::computeAccessFunctions(
  7014. const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
  7015. SmallVectorImpl<const SCEV *> &Sizes) {
  7016. // Early exit in case this SCEV is not an affine multivariate function.
  7017. if (Sizes.empty())
  7018. return;
  7019. if (auto AR = dyn_cast<SCEVAddRecExpr>(Expr))
  7020. if (!AR->isAffine())
  7021. return;
  7022. const SCEV *Res = Expr;
  7023. int Last = Sizes.size() - 1;
  7024. for (int i = Last; i >= 0; i--) {
  7025. const SCEV *Q, *R;
  7026. SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R);
  7027. DEBUG({
  7028. dbgs() << "Res: " << *Res << "\n";
  7029. dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
  7030. dbgs() << "Res divided by Sizes[i]:\n";
  7031. dbgs() << "Quotient: " << *Q << "\n";
  7032. dbgs() << "Remainder: " << *R << "\n";
  7033. });
  7034. Res = Q;
  7035. // Do not record the last subscript corresponding to the size of elements in
  7036. // the array.
  7037. if (i == Last) {
  7038. // Bail out if the remainder is too complex.
  7039. if (isa<SCEVAddRecExpr>(R)) {
  7040. Subscripts.clear();
  7041. Sizes.clear();
  7042. return;
  7043. }
  7044. continue;
  7045. }
  7046. // Record the access function for the current subscript.
  7047. Subscripts.push_back(R);
  7048. }
  7049. // Also push in last position the remainder of the last division: it will be
  7050. // the access function of the innermost dimension.
  7051. Subscripts.push_back(Res);
  7052. std::reverse(Subscripts.begin(), Subscripts.end());
  7053. DEBUG({
  7054. dbgs() << "Subscripts:\n";
  7055. for (const SCEV *S : Subscripts)
  7056. dbgs() << *S << "\n";
  7057. });
  7058. }
  7059. /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
  7060. /// sizes of an array access. Returns the remainder of the delinearization that
  7061. /// is the offset start of the array. The SCEV->delinearize algorithm computes
  7062. /// the multiples of SCEV coefficients: that is a pattern matching of sub
  7063. /// expressions in the stride and base of a SCEV corresponding to the
  7064. /// computation of a GCD (greatest common divisor) of base and stride. When
  7065. /// SCEV->delinearize fails, it returns the SCEV unchanged.
  7066. ///
  7067. /// For example: when analyzing the memory access A[i][j][k] in this loop nest
  7068. ///
  7069. /// void foo(long n, long m, long o, double A[n][m][o]) {
  7070. ///
  7071. /// for (long i = 0; i < n; i++)
  7072. /// for (long j = 0; j < m; j++)
  7073. /// for (long k = 0; k < o; k++)
  7074. /// A[i][j][k] = 1.0;
  7075. /// }
  7076. ///
  7077. /// the delinearization input is the following AddRec SCEV:
  7078. ///
  7079. /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
  7080. ///
  7081. /// From this SCEV, we are able to say that the base offset of the access is %A
  7082. /// because it appears as an offset that does not divide any of the strides in
  7083. /// the loops:
  7084. ///
  7085. /// CHECK: Base offset: %A
  7086. ///
  7087. /// and then SCEV->delinearize determines the size of some of the dimensions of
  7088. /// the array as these are the multiples by which the strides are happening:
  7089. ///
  7090. /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
  7091. ///
  7092. /// Note that the outermost dimension remains of UnknownSize because there are
  7093. /// no strides that would help identifying the size of the last dimension: when
  7094. /// the array has been statically allocated, one could compute the size of that
  7095. /// dimension by dividing the overall size of the array by the size of the known
  7096. /// dimensions: %m * %o * 8.
  7097. ///
  7098. /// Finally delinearize provides the access functions for the array reference
  7099. /// that does correspond to A[i][j][k] of the above C testcase:
  7100. ///
  7101. /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
  7102. ///
  7103. /// The testcases are checking the output of a function pass:
  7104. /// DelinearizationPass that walks through all loads and stores of a function
  7105. /// asking for the SCEV of the memory access with respect to all enclosing
  7106. /// loops, calling SCEV->delinearize on that and printing the results.
  7107. void ScalarEvolution::delinearize(const SCEV *Expr,
  7108. SmallVectorImpl<const SCEV *> &Subscripts,
  7109. SmallVectorImpl<const SCEV *> &Sizes,
  7110. const SCEV *ElementSize) {
  7111. // First step: collect parametric terms.
  7112. SmallVector<const SCEV *, 4> Terms;
  7113. collectParametricTerms(Expr, Terms);
  7114. if (Terms.empty())
  7115. return;
  7116. // Second step: find subscript sizes.
  7117. findArrayDimensions(Terms, Sizes, ElementSize);
  7118. if (Sizes.empty())
  7119. return;
  7120. // Third step: compute the access functions for each subscript.
  7121. computeAccessFunctions(Expr, Subscripts, Sizes);
  7122. if (Subscripts.empty())
  7123. return;
  7124. DEBUG({
  7125. dbgs() << "succeeded to delinearize " << *Expr << "\n";
  7126. dbgs() << "ArrayDecl[UnknownSize]";
  7127. for (const SCEV *S : Sizes)
  7128. dbgs() << "[" << *S << "]";
  7129. dbgs() << "\nArrayRef";
  7130. for (const SCEV *S : Subscripts)
  7131. dbgs() << "[" << *S << "]";
  7132. dbgs() << "\n";
  7133. });
  7134. }
  7135. //===----------------------------------------------------------------------===//
  7136. // SCEVCallbackVH Class Implementation
  7137. //===----------------------------------------------------------------------===//
  7138. void ScalarEvolution::SCEVCallbackVH::deleted() {
  7139. assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
  7140. if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
  7141. SE->ConstantEvolutionLoopExitValue.erase(PN);
  7142. SE->ValueExprMap.erase(getValPtr());
  7143. // this now dangles!
  7144. }
  7145. void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
  7146. assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
  7147. // Forget all the expressions associated with users of the old value,
  7148. // so that future queries will recompute the expressions using the new
  7149. // value.
  7150. Value *Old = getValPtr();
  7151. SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
  7152. SmallPtrSet<User *, 8> Visited;
  7153. while (!Worklist.empty()) {
  7154. User *U = Worklist.pop_back_val();
  7155. // Deleting the Old value will cause this to dangle. Postpone
  7156. // that until everything else is done.
  7157. if (U == Old)
  7158. continue;
  7159. if (!Visited.insert(U).second)
  7160. continue;
  7161. if (PHINode *PN = dyn_cast<PHINode>(U))
  7162. SE->ConstantEvolutionLoopExitValue.erase(PN);
  7163. SE->ValueExprMap.erase(U);
  7164. Worklist.insert(Worklist.end(), U->user_begin(), U->user_end());
  7165. }
  7166. // Delete the Old value.
  7167. if (PHINode *PN = dyn_cast<PHINode>(Old))
  7168. SE->ConstantEvolutionLoopExitValue.erase(PN);
  7169. SE->ValueExprMap.erase(Old);
  7170. // this now dangles!
  7171. }
  7172. ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
  7173. : CallbackVH(V), SE(se) {}
  7174. //===----------------------------------------------------------------------===//
  7175. // ScalarEvolution Class Implementation
  7176. //===----------------------------------------------------------------------===//
  7177. ScalarEvolution::ScalarEvolution()
  7178. : FunctionPass(ID), WalkingBEDominatingConds(false), ValuesAtScopes(64),
  7179. LoopDispositions(64), BlockDispositions(64), FirstUnknown(nullptr) {
  7180. initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
  7181. }
  7182. bool ScalarEvolution::runOnFunction(Function &F) {
  7183. this->F = &F;
  7184. AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
  7185. LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  7186. TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
  7187. DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  7188. return false;
  7189. }
  7190. void ScalarEvolution::releaseMemory() {
  7191. // Iterate through all the SCEVUnknown instances and call their
  7192. // destructors, so that they release their references to their values.
  7193. for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
  7194. U->~SCEVUnknown();
  7195. FirstUnknown = nullptr;
  7196. ValueExprMap.clear();
  7197. // Free any extra memory created for ExitNotTakenInfo in the unlikely event
  7198. // that a loop had multiple computable exits.
  7199. for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
  7200. BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end();
  7201. I != E; ++I) {
  7202. I->second.clear();
  7203. }
  7204. assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
  7205. assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
  7206. BackedgeTakenCounts.clear();
  7207. ConstantEvolutionLoopExitValue.clear();
  7208. ValuesAtScopes.clear();
  7209. LoopDispositions.clear();
  7210. BlockDispositions.clear();
  7211. UnsignedRanges.clear();
  7212. SignedRanges.clear();
  7213. UniqueSCEVs.clear();
  7214. SCEVAllocator.Reset();
  7215. }
  7216. void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
  7217. AU.setPreservesAll();
  7218. AU.addRequired<AssumptionCacheTracker>();
  7219. AU.addRequiredTransitive<LoopInfoWrapperPass>();
  7220. AU.addRequiredTransitive<DominatorTreeWrapperPass>();
  7221. AU.addRequired<TargetLibraryInfoWrapperPass>();
  7222. AU.addRequired<DxilValueCache>(); // HLSL Change
  7223. }
  7224. bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
  7225. return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
  7226. }
  7227. static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
  7228. const Loop *L) {
  7229. // Print all inner loops first
  7230. for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
  7231. PrintLoopInfo(OS, SE, *I);
  7232. OS << "Loop ";
  7233. L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
  7234. OS << ": ";
  7235. SmallVector<BasicBlock *, 8> ExitBlocks;
  7236. L->getExitBlocks(ExitBlocks);
  7237. if (ExitBlocks.size() != 1)
  7238. OS << "<multiple exits> ";
  7239. if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
  7240. OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
  7241. } else {
  7242. OS << "Unpredictable backedge-taken count. ";
  7243. }
  7244. OS << "\n"
  7245. "Loop ";
  7246. L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
  7247. OS << ": ";
  7248. if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
  7249. OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
  7250. } else {
  7251. OS << "Unpredictable max backedge-taken count. ";
  7252. }
  7253. OS << "\n";
  7254. }
  7255. void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
  7256. // ScalarEvolution's implementation of the print method is to print
  7257. // out SCEV values of all instructions that are interesting. Doing
  7258. // this potentially causes it to create new SCEV objects though,
  7259. // which technically conflicts with the const qualifier. This isn't
  7260. // observable from outside the class though, so casting away the
  7261. // const isn't dangerous.
  7262. ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
  7263. OS << "Classifying expressions for: ";
  7264. F->printAsOperand(OS, /*PrintType=*/false);
  7265. OS << "\n";
  7266. for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
  7267. if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
  7268. OS << *I << '\n';
  7269. OS << " --> ";
  7270. const SCEV *SV = SE.getSCEV(&*I);
  7271. SV->print(OS);
  7272. if (!isa<SCEVCouldNotCompute>(SV)) {
  7273. OS << " U: ";
  7274. SE.getUnsignedRange(SV).print(OS);
  7275. OS << " S: ";
  7276. SE.getSignedRange(SV).print(OS);
  7277. }
  7278. const Loop *L = LI->getLoopFor((*I).getParent());
  7279. const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
  7280. if (AtUse != SV) {
  7281. OS << " --> ";
  7282. AtUse->print(OS);
  7283. if (!isa<SCEVCouldNotCompute>(AtUse)) {
  7284. OS << " U: ";
  7285. SE.getUnsignedRange(AtUse).print(OS);
  7286. OS << " S: ";
  7287. SE.getSignedRange(AtUse).print(OS);
  7288. }
  7289. }
  7290. if (L) {
  7291. OS << "\t\t" "Exits: ";
  7292. const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
  7293. if (!SE.isLoopInvariant(ExitValue, L)) {
  7294. OS << "<<Unknown>>";
  7295. } else {
  7296. OS << *ExitValue;
  7297. }
  7298. }
  7299. OS << "\n";
  7300. }
  7301. OS << "Determining loop execution counts for: ";
  7302. F->printAsOperand(OS, /*PrintType=*/false);
  7303. OS << "\n";
  7304. for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
  7305. PrintLoopInfo(OS, &SE, *I);
  7306. }
  7307. ScalarEvolution::LoopDisposition
  7308. ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
  7309. auto &Values = LoopDispositions[S];
  7310. for (auto &V : Values) {
  7311. if (V.getPointer() == L)
  7312. return V.getInt();
  7313. }
  7314. Values.emplace_back(L, LoopVariant);
  7315. LoopDisposition D = computeLoopDisposition(S, L);
  7316. auto &Values2 = LoopDispositions[S];
  7317. for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
  7318. if (V.getPointer() == L) {
  7319. V.setInt(D);
  7320. break;
  7321. }
  7322. }
  7323. return D;
  7324. }
  7325. ScalarEvolution::LoopDisposition
  7326. ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
  7327. switch (static_cast<SCEVTypes>(S->getSCEVType())) {
  7328. case scConstant:
  7329. return LoopInvariant;
  7330. case scTruncate:
  7331. case scZeroExtend:
  7332. case scSignExtend:
  7333. return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
  7334. case scAddRecExpr: {
  7335. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
  7336. // If L is the addrec's loop, it's computable.
  7337. if (AR->getLoop() == L)
  7338. return LoopComputable;
  7339. // Add recurrences are never invariant in the function-body (null loop).
  7340. if (!L)
  7341. return LoopVariant;
  7342. // This recurrence is variant w.r.t. L if L contains AR's loop.
  7343. if (L->contains(AR->getLoop()))
  7344. return LoopVariant;
  7345. // This recurrence is invariant w.r.t. L if AR's loop contains L.
  7346. if (AR->getLoop()->contains(L))
  7347. return LoopInvariant;
  7348. // This recurrence is variant w.r.t. L if any of its operands
  7349. // are variant.
  7350. for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
  7351. I != E; ++I)
  7352. if (!isLoopInvariant(*I, L))
  7353. return LoopVariant;
  7354. // Otherwise it's loop-invariant.
  7355. return LoopInvariant;
  7356. }
  7357. case scAddExpr:
  7358. case scMulExpr:
  7359. case scUMaxExpr:
  7360. case scSMaxExpr: {
  7361. const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
  7362. bool HasVarying = false;
  7363. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  7364. I != E; ++I) {
  7365. LoopDisposition D = getLoopDisposition(*I, L);
  7366. if (D == LoopVariant)
  7367. return LoopVariant;
  7368. if (D == LoopComputable)
  7369. HasVarying = true;
  7370. }
  7371. return HasVarying ? LoopComputable : LoopInvariant;
  7372. }
  7373. case scUDivExpr: {
  7374. const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
  7375. LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
  7376. if (LD == LoopVariant)
  7377. return LoopVariant;
  7378. LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
  7379. if (RD == LoopVariant)
  7380. return LoopVariant;
  7381. return (LD == LoopInvariant && RD == LoopInvariant) ?
  7382. LoopInvariant : LoopComputable;
  7383. }
  7384. case scUnknown:
  7385. // All non-instruction values are loop invariant. All instructions are loop
  7386. // invariant if they are not contained in the specified loop.
  7387. // Instructions are never considered invariant in the function body
  7388. // (null loop) because they are defined within the "loop".
  7389. if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
  7390. return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
  7391. return LoopInvariant;
  7392. case scCouldNotCompute:
  7393. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  7394. }
  7395. llvm_unreachable("Unknown SCEV kind!");
  7396. }
  7397. bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
  7398. return getLoopDisposition(S, L) == LoopInvariant;
  7399. }
  7400. bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
  7401. return getLoopDisposition(S, L) == LoopComputable;
  7402. }
  7403. ScalarEvolution::BlockDisposition
  7404. ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
  7405. auto &Values = BlockDispositions[S];
  7406. for (auto &V : Values) {
  7407. if (V.getPointer() == BB)
  7408. return V.getInt();
  7409. }
  7410. Values.emplace_back(BB, DoesNotDominateBlock);
  7411. BlockDisposition D = computeBlockDisposition(S, BB);
  7412. auto &Values2 = BlockDispositions[S];
  7413. for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
  7414. if (V.getPointer() == BB) {
  7415. V.setInt(D);
  7416. break;
  7417. }
  7418. }
  7419. return D;
  7420. }
  7421. ScalarEvolution::BlockDisposition
  7422. ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
  7423. switch (static_cast<SCEVTypes>(S->getSCEVType())) {
  7424. case scConstant:
  7425. return ProperlyDominatesBlock;
  7426. case scTruncate:
  7427. case scZeroExtend:
  7428. case scSignExtend:
  7429. return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
  7430. case scAddRecExpr: {
  7431. // This uses a "dominates" query instead of "properly dominates" query
  7432. // to test for proper dominance too, because the instruction which
  7433. // produces the addrec's value is a PHI, and a PHI effectively properly
  7434. // dominates its entire containing block.
  7435. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
  7436. if (!DT->dominates(AR->getLoop()->getHeader(), BB))
  7437. return DoesNotDominateBlock;
  7438. }
  7439. // FALL THROUGH into SCEVNAryExpr handling.
  7440. case scAddExpr:
  7441. case scMulExpr:
  7442. case scUMaxExpr:
  7443. case scSMaxExpr: {
  7444. const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
  7445. bool Proper = true;
  7446. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  7447. I != E; ++I) {
  7448. BlockDisposition D = getBlockDisposition(*I, BB);
  7449. if (D == DoesNotDominateBlock)
  7450. return DoesNotDominateBlock;
  7451. if (D == DominatesBlock)
  7452. Proper = false;
  7453. }
  7454. return Proper ? ProperlyDominatesBlock : DominatesBlock;
  7455. }
  7456. case scUDivExpr: {
  7457. const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
  7458. const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
  7459. BlockDisposition LD = getBlockDisposition(LHS, BB);
  7460. if (LD == DoesNotDominateBlock)
  7461. return DoesNotDominateBlock;
  7462. BlockDisposition RD = getBlockDisposition(RHS, BB);
  7463. if (RD == DoesNotDominateBlock)
  7464. return DoesNotDominateBlock;
  7465. return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
  7466. ProperlyDominatesBlock : DominatesBlock;
  7467. }
  7468. case scUnknown:
  7469. if (Instruction *I =
  7470. dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
  7471. if (I->getParent() == BB)
  7472. return DominatesBlock;
  7473. if (DT->properlyDominates(I->getParent(), BB))
  7474. return ProperlyDominatesBlock;
  7475. return DoesNotDominateBlock;
  7476. }
  7477. return ProperlyDominatesBlock;
  7478. case scCouldNotCompute:
  7479. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  7480. }
  7481. llvm_unreachable("Unknown SCEV kind!");
  7482. }
  7483. bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
  7484. return getBlockDisposition(S, BB) >= DominatesBlock;
  7485. }
  7486. bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
  7487. return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
  7488. }
  7489. namespace {
  7490. // Search for a SCEV expression node within an expression tree.
  7491. // Implements SCEVTraversal::Visitor.
  7492. struct SCEVSearch {
  7493. const SCEV *Node;
  7494. bool IsFound;
  7495. SCEVSearch(const SCEV *N): Node(N), IsFound(false) {}
  7496. bool follow(const SCEV *S) {
  7497. IsFound |= (S == Node);
  7498. return !IsFound;
  7499. }
  7500. bool isDone() const { return IsFound; }
  7501. };
  7502. }
  7503. bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
  7504. SCEVSearch Search(Op);
  7505. visitAll(S, Search);
  7506. return Search.IsFound;
  7507. }
  7508. void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
  7509. ValuesAtScopes.erase(S);
  7510. LoopDispositions.erase(S);
  7511. BlockDispositions.erase(S);
  7512. UnsignedRanges.erase(S);
  7513. SignedRanges.erase(S);
  7514. for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
  7515. BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ) {
  7516. BackedgeTakenInfo &BEInfo = I->second;
  7517. if (BEInfo.hasOperand(S, this)) {
  7518. BEInfo.clear();
  7519. BackedgeTakenCounts.erase(I++);
  7520. }
  7521. else
  7522. ++I;
  7523. }
  7524. }
  7525. typedef DenseMap<const Loop *, std::string> VerifyMap;
  7526. /// replaceSubString - Replaces all occurrences of From in Str with To.
  7527. static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
  7528. size_t Pos = 0;
  7529. while ((Pos = Str.find(From, Pos)) != std::string::npos) {
  7530. Str.replace(Pos, From.size(), To.data(), To.size());
  7531. Pos += To.size();
  7532. }
  7533. }
  7534. /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
  7535. static void
  7536. getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
  7537. for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
  7538. getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
  7539. std::string &S = Map[L];
  7540. if (S.empty()) {
  7541. raw_string_ostream OS(S);
  7542. SE.getBackedgeTakenCount(L)->print(OS);
  7543. // false and 0 are semantically equivalent. This can happen in dead loops.
  7544. replaceSubString(OS.str(), "false", "0");
  7545. // Remove wrap flags, their use in SCEV is highly fragile.
  7546. // FIXME: Remove this when SCEV gets smarter about them.
  7547. replaceSubString(OS.str(), "<nw>", "");
  7548. replaceSubString(OS.str(), "<nsw>", "");
  7549. replaceSubString(OS.str(), "<nuw>", "");
  7550. }
  7551. }
  7552. }
  7553. void ScalarEvolution::verifyAnalysis() const {
  7554. if (!VerifySCEV)
  7555. return;
  7556. ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
  7557. // Gather stringified backedge taken counts for all loops using SCEV's caches.
  7558. // FIXME: It would be much better to store actual values instead of strings,
  7559. // but SCEV pointers will change if we drop the caches.
  7560. VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
  7561. for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
  7562. getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
  7563. // Gather stringified backedge taken counts for all loops without using
  7564. // SCEV's caches.
  7565. SE.releaseMemory();
  7566. for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
  7567. getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
  7568. // Now compare whether they're the same with and without caches. This allows
  7569. // verifying that no pass changed the cache.
  7570. assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
  7571. "New loops suddenly appeared!");
  7572. for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
  7573. OldE = BackedgeDumpsOld.end(),
  7574. NewI = BackedgeDumpsNew.begin();
  7575. OldI != OldE; ++OldI, ++NewI) {
  7576. assert(OldI->first == NewI->first && "Loop order changed!");
  7577. // Compare the stringified SCEVs. We don't care if undef backedgetaken count
  7578. // changes.
  7579. // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
  7580. // means that a pass is buggy or SCEV has to learn a new pattern but is
  7581. // usually not harmful.
  7582. if (OldI->second != NewI->second &&
  7583. OldI->second.find("undef") == std::string::npos &&
  7584. NewI->second.find("undef") == std::string::npos &&
  7585. OldI->second != "***COULDNOTCOMPUTE***" &&
  7586. NewI->second != "***COULDNOTCOMPUTE***") {
  7587. dbgs() << "SCEVValidator: SCEV for loop '"
  7588. << OldI->first->getHeader()->getName()
  7589. << "' changed from '" << OldI->second
  7590. << "' to '" << NewI->second << "'!\n";
  7591. std::abort();
  7592. }
  7593. }
  7594. // TODO: Verify more things.
  7595. }