ScalarEvolution.cpp 326 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539
  1. //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the implementation of the scalar evolution analysis
  11. // engine, which is used primarily to analyze expressions involving induction
  12. // variables in loops.
  13. //
  14. // There are several aspects to this library. First is the representation of
  15. // scalar expressions, which are represented as subclasses of the SCEV class.
  16. // These classes are used to represent certain types of subexpressions that we
  17. // can handle. We only create one SCEV of a particular shape, so
  18. // pointer-comparisons for equality are legal.
  19. //
  20. // One important aspect of the SCEV objects is that they are never cyclic, even
  21. // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
  22. // the PHI node is one of the idioms that we can represent (e.g., a polynomial
  23. // recurrence) then we represent it directly as a recurrence node, otherwise we
  24. // represent it as a SCEVUnknown node.
  25. //
  26. // In addition to being able to represent expressions of various types, we also
  27. // have folders that are used to build the *canonical* representation for a
  28. // particular expression. These folders are capable of using a variety of
  29. // rewrite rules to simplify the expressions.
  30. //
  31. // Once the folders are defined, we can implement the more interesting
  32. // higher-level code, such as the code that recognizes PHI nodes of various
  33. // types, computes the execution count of a loop, etc.
  34. //
  35. // TODO: We should use these routines and value representations to implement
  36. // dependence analysis!
  37. //
  38. //===----------------------------------------------------------------------===//
  39. //
  40. // There are several good references for the techniques used in this analysis.
  41. //
  42. // Chains of recurrences -- a method to expedite the evaluation
  43. // of closed-form functions
  44. // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
  45. //
  46. // On computational properties of chains of recurrences
  47. // Eugene V. Zima
  48. //
  49. // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
  50. // Robert A. van Engelen
  51. //
  52. // Efficient Symbolic Analysis for Optimizing Compilers
  53. // Robert A. van Engelen
  54. //
  55. // Using the chains of recurrences algebra for data dependence testing and
  56. // induction variable substitution
  57. // MS Thesis, Johnie Birch
  58. //
  59. //===----------------------------------------------------------------------===//
  60. #include "llvm/Analysis/ScalarEvolution.h"
  61. #include "llvm/ADT/Optional.h"
  62. #include "llvm/ADT/STLExtras.h"
  63. #include "llvm/ADT/SmallPtrSet.h"
  64. #include "llvm/ADT/Statistic.h"
  65. #include "llvm/Analysis/AssumptionCache.h"
  66. #include "llvm/Analysis/ConstantFolding.h"
  67. #include "llvm/Analysis/InstructionSimplify.h"
  68. #include "llvm/Analysis/LoopInfo.h"
  69. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  70. #include "llvm/Analysis/TargetLibraryInfo.h"
  71. #include "llvm/Analysis/ValueTracking.h"
  72. #include "llvm/IR/ConstantRange.h"
  73. #include "llvm/IR/Constants.h"
  74. #include "llvm/IR/DataLayout.h"
  75. #include "llvm/IR/DerivedTypes.h"
  76. #include "llvm/IR/Dominators.h"
  77. #include "llvm/IR/GetElementPtrTypeIterator.h"
  78. #include "llvm/IR/GlobalAlias.h"
  79. #include "llvm/IR/GlobalVariable.h"
  80. #include "llvm/IR/InstIterator.h"
  81. #include "llvm/IR/Instructions.h"
  82. #include "llvm/IR/LLVMContext.h"
  83. #include "llvm/IR/Metadata.h"
  84. #include "llvm/IR/Operator.h"
  85. #include "llvm/Support/CommandLine.h"
  86. #include "llvm/Support/Debug.h"
  87. #include "llvm/Support/ErrorHandling.h"
  88. #include "llvm/Support/MathExtras.h"
  89. #include "llvm/Support/raw_ostream.h"
  90. #include <algorithm>
  91. using namespace llvm;
  92. #define DEBUG_TYPE "scalar-evolution"
  93. STATISTIC(NumArrayLenItCounts,
  94. "Number of trip counts computed with array length");
  95. STATISTIC(NumTripCountsComputed,
  96. "Number of loops with predictable loop counts");
  97. STATISTIC(NumTripCountsNotComputed,
  98. "Number of loops without predictable loop counts");
  99. STATISTIC(NumBruteForceTripCountsComputed,
  100. "Number of loops with trip counts computed by force");
  101. #if 0 // HLSL Change Starts - option pending
  102. static cl::opt<unsigned>
  103. MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
  104. cl::desc("Maximum number of iterations SCEV will "
  105. "symbolically execute a constant "
  106. "derived loop"),
  107. cl::init(100));
  108. // FIXME: Enable this with XDEBUG when the test suite is clean.
  109. static cl::opt<bool>
  110. VerifySCEV("verify-scev",
  111. cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
  112. #else
  113. static const unsigned MaxBruteForceIterations = 100;
  114. static const bool VerifySCEV = false;
  115. #endif // HLSL Change Ends
  116. INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
  117. "Scalar Evolution Analysis", false, true)
  118. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  119. INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
  120. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  121. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
  122. INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
  123. "Scalar Evolution Analysis", false, true)
  124. char ScalarEvolution::ID = 0;
  125. //===----------------------------------------------------------------------===//
  126. // SCEV class definitions
  127. //===----------------------------------------------------------------------===//
  128. //===----------------------------------------------------------------------===//
  129. // Implementation of the SCEV class.
  130. //
  131. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  132. void SCEV::dump() const {
  133. print(dbgs());
  134. dbgs() << '\n';
  135. }
  136. #endif
  137. void SCEV::print(raw_ostream &OS) const {
  138. switch (static_cast<SCEVTypes>(getSCEVType())) {
  139. case scConstant:
  140. cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
  141. return;
  142. case scTruncate: {
  143. const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
  144. const SCEV *Op = Trunc->getOperand();
  145. OS << "(trunc " << *Op->getType() << " " << *Op << " to "
  146. << *Trunc->getType() << ")";
  147. return;
  148. }
  149. case scZeroExtend: {
  150. const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
  151. const SCEV *Op = ZExt->getOperand();
  152. OS << "(zext " << *Op->getType() << " " << *Op << " to "
  153. << *ZExt->getType() << ")";
  154. return;
  155. }
  156. case scSignExtend: {
  157. const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
  158. const SCEV *Op = SExt->getOperand();
  159. OS << "(sext " << *Op->getType() << " " << *Op << " to "
  160. << *SExt->getType() << ")";
  161. return;
  162. }
  163. case scAddRecExpr: {
  164. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
  165. OS << "{" << *AR->getOperand(0);
  166. for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
  167. OS << ",+," << *AR->getOperand(i);
  168. OS << "}<";
  169. if (AR->getNoWrapFlags(FlagNUW))
  170. OS << "nuw><";
  171. if (AR->getNoWrapFlags(FlagNSW))
  172. OS << "nsw><";
  173. if (AR->getNoWrapFlags(FlagNW) &&
  174. !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
  175. OS << "nw><";
  176. AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
  177. OS << ">";
  178. return;
  179. }
  180. case scAddExpr:
  181. case scMulExpr:
  182. case scUMaxExpr:
  183. case scSMaxExpr: {
  184. const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
  185. const char *OpStr = nullptr;
  186. switch (NAry->getSCEVType()) {
  187. case scAddExpr: OpStr = " + "; break;
  188. case scMulExpr: OpStr = " * "; break;
  189. case scUMaxExpr: OpStr = " umax "; break;
  190. case scSMaxExpr: OpStr = " smax "; break;
  191. }
  192. OS << "(";
  193. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  194. I != E; ++I) {
  195. OS << **I;
  196. if (std::next(I) != E)
  197. OS << OpStr;
  198. }
  199. OS << ")";
  200. switch (NAry->getSCEVType()) {
  201. case scAddExpr:
  202. case scMulExpr:
  203. if (NAry->getNoWrapFlags(FlagNUW))
  204. OS << "<nuw>";
  205. if (NAry->getNoWrapFlags(FlagNSW))
  206. OS << "<nsw>";
  207. }
  208. return;
  209. }
  210. case scUDivExpr: {
  211. const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
  212. OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
  213. return;
  214. }
  215. case scUnknown: {
  216. const SCEVUnknown *U = cast<SCEVUnknown>(this);
  217. Type *AllocTy;
  218. if (U->isSizeOf(AllocTy)) {
  219. OS << "sizeof(" << *AllocTy << ")";
  220. return;
  221. }
  222. if (U->isAlignOf(AllocTy)) {
  223. OS << "alignof(" << *AllocTy << ")";
  224. return;
  225. }
  226. Type *CTy;
  227. Constant *FieldNo;
  228. if (U->isOffsetOf(CTy, FieldNo)) {
  229. OS << "offsetof(" << *CTy << ", ";
  230. FieldNo->printAsOperand(OS, false);
  231. OS << ")";
  232. return;
  233. }
  234. // Otherwise just print it normally.
  235. U->getValue()->printAsOperand(OS, false);
  236. return;
  237. }
  238. case scCouldNotCompute:
  239. OS << "***COULDNOTCOMPUTE***";
  240. return;
  241. }
  242. llvm_unreachable("Unknown SCEV kind!");
  243. }
  244. Type *SCEV::getType() const {
  245. switch (static_cast<SCEVTypes>(getSCEVType())) {
  246. case scConstant:
  247. return cast<SCEVConstant>(this)->getType();
  248. case scTruncate:
  249. case scZeroExtend:
  250. case scSignExtend:
  251. return cast<SCEVCastExpr>(this)->getType();
  252. case scAddRecExpr:
  253. case scMulExpr:
  254. case scUMaxExpr:
  255. case scSMaxExpr:
  256. return cast<SCEVNAryExpr>(this)->getType();
  257. case scAddExpr:
  258. return cast<SCEVAddExpr>(this)->getType();
  259. case scUDivExpr:
  260. return cast<SCEVUDivExpr>(this)->getType();
  261. case scUnknown:
  262. return cast<SCEVUnknown>(this)->getType();
  263. case scCouldNotCompute:
  264. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  265. }
  266. llvm_unreachable("Unknown SCEV kind!");
  267. }
  268. bool SCEV::isZero() const {
  269. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
  270. return SC->getValue()->isZero();
  271. return false;
  272. }
  273. bool SCEV::isOne() const {
  274. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
  275. return SC->getValue()->isOne();
  276. return false;
  277. }
  278. bool SCEV::isAllOnesValue() const {
  279. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
  280. return SC->getValue()->isAllOnesValue();
  281. return false;
  282. }
  283. /// isNonConstantNegative - Return true if the specified scev is negated, but
  284. /// not a constant.
  285. bool SCEV::isNonConstantNegative() const {
  286. const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
  287. if (!Mul) return false;
  288. // If there is a constant factor, it will be first.
  289. const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
  290. if (!SC) return false;
  291. // Return true if the value is negative, this matches things like (-42 * V).
  292. return SC->getValue()->getValue().isNegative();
  293. }
  294. SCEVCouldNotCompute::SCEVCouldNotCompute() :
  295. SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
  296. bool SCEVCouldNotCompute::classof(const SCEV *S) {
  297. return S->getSCEVType() == scCouldNotCompute;
  298. }
  299. const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
  300. FoldingSetNodeID ID;
  301. ID.AddInteger(scConstant);
  302. ID.AddPointer(V);
  303. void *IP = nullptr;
  304. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  305. SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
  306. UniqueSCEVs.InsertNode(S, IP);
  307. return S;
  308. }
  309. const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
  310. return getConstant(ConstantInt::get(getContext(), Val));
  311. }
  312. const SCEV *
  313. ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
  314. IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
  315. return getConstant(ConstantInt::get(ITy, V, isSigned));
  316. }
  317. SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
  318. unsigned SCEVTy, const SCEV *op, Type *ty)
  319. : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
  320. SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
  321. const SCEV *op, Type *ty)
  322. : SCEVCastExpr(ID, scTruncate, op, ty) {
  323. assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
  324. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  325. "Cannot truncate non-integer value!");
  326. }
  327. SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
  328. const SCEV *op, Type *ty)
  329. : SCEVCastExpr(ID, scZeroExtend, op, ty) {
  330. assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
  331. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  332. "Cannot zero extend non-integer value!");
  333. }
  334. SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
  335. const SCEV *op, Type *ty)
  336. : SCEVCastExpr(ID, scSignExtend, op, ty) {
  337. assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
  338. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  339. "Cannot sign extend non-integer value!");
  340. }
  341. void SCEVUnknown::deleted() {
  342. // Clear this SCEVUnknown from various maps.
  343. SE->forgetMemoizedResults(this);
  344. // Remove this SCEVUnknown from the uniquing map.
  345. SE->UniqueSCEVs.RemoveNode(this);
  346. // Release the value.
  347. setValPtr(nullptr);
  348. }
  349. void SCEVUnknown::allUsesReplacedWith(Value *New) {
  350. // Clear this SCEVUnknown from various maps.
  351. SE->forgetMemoizedResults(this);
  352. // Remove this SCEVUnknown from the uniquing map.
  353. SE->UniqueSCEVs.RemoveNode(this);
  354. // Update this SCEVUnknown to point to the new value. This is needed
  355. // because there may still be outstanding SCEVs which still point to
  356. // this SCEVUnknown.
  357. setValPtr(New);
  358. }
  359. bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
  360. if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
  361. if (VCE->getOpcode() == Instruction::PtrToInt)
  362. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
  363. if (CE->getOpcode() == Instruction::GetElementPtr &&
  364. CE->getOperand(0)->isNullValue() &&
  365. CE->getNumOperands() == 2)
  366. if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
  367. if (CI->isOne()) {
  368. AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
  369. ->getElementType();
  370. return true;
  371. }
  372. return false;
  373. }
  374. bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
  375. if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
  376. if (VCE->getOpcode() == Instruction::PtrToInt)
  377. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
  378. if (CE->getOpcode() == Instruction::GetElementPtr &&
  379. CE->getOperand(0)->isNullValue()) {
  380. Type *Ty =
  381. cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
  382. if (StructType *STy = dyn_cast<StructType>(Ty))
  383. if (!STy->isPacked() &&
  384. CE->getNumOperands() == 3 &&
  385. CE->getOperand(1)->isNullValue()) {
  386. if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
  387. if (CI->isOne() &&
  388. STy->getNumElements() == 2 &&
  389. STy->getElementType(0)->isIntegerTy(1)) {
  390. AllocTy = STy->getElementType(1);
  391. return true;
  392. }
  393. }
  394. }
  395. return false;
  396. }
  397. bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
  398. if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
  399. if (VCE->getOpcode() == Instruction::PtrToInt)
  400. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
  401. if (CE->getOpcode() == Instruction::GetElementPtr &&
  402. CE->getNumOperands() == 3 &&
  403. CE->getOperand(0)->isNullValue() &&
  404. CE->getOperand(1)->isNullValue()) {
  405. Type *Ty =
  406. cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
  407. // Ignore vector types here so that ScalarEvolutionExpander doesn't
  408. // emit getelementptrs that index into vectors.
  409. if (Ty->isStructTy() || Ty->isArrayTy()) {
  410. CTy = Ty;
  411. FieldNo = CE->getOperand(2);
  412. return true;
  413. }
  414. }
  415. return false;
  416. }
  417. //===----------------------------------------------------------------------===//
  418. // SCEV Utilities
  419. //===----------------------------------------------------------------------===//
  420. namespace {
  421. /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
  422. /// than the complexity of the RHS. This comparator is used to canonicalize
  423. /// expressions.
  424. class SCEVComplexityCompare {
  425. const LoopInfo *const LI;
  426. public:
  427. explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
  428. // Return true or false if LHS is less than, or at least RHS, respectively.
  429. bool operator()(const SCEV *LHS, const SCEV *RHS) const {
  430. return compare(LHS, RHS) < 0;
  431. }
  432. // Return negative, zero, or positive, if LHS is less than, equal to, or
  433. // greater than RHS, respectively. A three-way result allows recursive
  434. // comparisons to be more efficient.
  435. int compare(const SCEV *LHS, const SCEV *RHS) const {
  436. // Fast-path: SCEVs are uniqued so we can do a quick equality check.
  437. if (LHS == RHS)
  438. return 0;
  439. // Primarily, sort the SCEVs by their getSCEVType().
  440. unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
  441. if (LType != RType)
  442. return (int)LType - (int)RType;
  443. // Aside from the getSCEVType() ordering, the particular ordering
  444. // isn't very important except that it's beneficial to be consistent,
  445. // so that (a + b) and (b + a) don't end up as different expressions.
  446. switch (static_cast<SCEVTypes>(LType)) {
  447. case scUnknown: {
  448. const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
  449. const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
  450. // Sort SCEVUnknown values with some loose heuristics. TODO: This is
  451. // not as complete as it could be.
  452. const Value *LV = LU->getValue(), *RV = RU->getValue();
  453. // Order pointer values after integer values. This helps SCEVExpander
  454. // form GEPs.
  455. bool LIsPointer = LV->getType()->isPointerTy(),
  456. RIsPointer = RV->getType()->isPointerTy();
  457. if (LIsPointer != RIsPointer)
  458. return (int)LIsPointer - (int)RIsPointer;
  459. // Compare getValueID values.
  460. unsigned LID = LV->getValueID(),
  461. RID = RV->getValueID();
  462. if (LID != RID)
  463. return (int)LID - (int)RID;
  464. // Sort arguments by their position.
  465. if (const Argument *LA = dyn_cast<Argument>(LV)) {
  466. const Argument *RA = cast<Argument>(RV);
  467. unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
  468. return (int)LArgNo - (int)RArgNo;
  469. }
  470. // For instructions, compare their loop depth, and their operand
  471. // count. This is pretty loose.
  472. if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
  473. const Instruction *RInst = cast<Instruction>(RV);
  474. // Compare loop depths.
  475. const BasicBlock *LParent = LInst->getParent(),
  476. *RParent = RInst->getParent();
  477. if (LParent != RParent) {
  478. unsigned LDepth = LI->getLoopDepth(LParent),
  479. RDepth = LI->getLoopDepth(RParent);
  480. if (LDepth != RDepth)
  481. return (int)LDepth - (int)RDepth;
  482. }
  483. // Compare the number of operands.
  484. unsigned LNumOps = LInst->getNumOperands(),
  485. RNumOps = RInst->getNumOperands();
  486. return (int)LNumOps - (int)RNumOps;
  487. }
  488. return 0;
  489. }
  490. case scConstant: {
  491. const SCEVConstant *LC = cast<SCEVConstant>(LHS);
  492. const SCEVConstant *RC = cast<SCEVConstant>(RHS);
  493. // Compare constant values.
  494. const APInt &LA = LC->getValue()->getValue();
  495. const APInt &RA = RC->getValue()->getValue();
  496. unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
  497. if (LBitWidth != RBitWidth)
  498. return (int)LBitWidth - (int)RBitWidth;
  499. return LA.ult(RA) ? -1 : 1;
  500. }
  501. case scAddRecExpr: {
  502. const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
  503. const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
  504. // Compare addrec loop depths.
  505. const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
  506. if (LLoop != RLoop) {
  507. unsigned LDepth = LLoop->getLoopDepth(),
  508. RDepth = RLoop->getLoopDepth();
  509. if (LDepth != RDepth)
  510. return (int)LDepth - (int)RDepth;
  511. }
  512. // Addrec complexity grows with operand count.
  513. unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
  514. if (LNumOps != RNumOps)
  515. return (int)LNumOps - (int)RNumOps;
  516. // Lexicographically compare.
  517. for (unsigned i = 0; i != LNumOps; ++i) {
  518. long X = compare(LA->getOperand(i), RA->getOperand(i));
  519. if (X != 0)
  520. return X;
  521. }
  522. return 0;
  523. }
  524. case scAddExpr:
  525. case scMulExpr:
  526. case scSMaxExpr:
  527. case scUMaxExpr: {
  528. const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
  529. const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
  530. // Lexicographically compare n-ary expressions.
  531. unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
  532. if (LNumOps != RNumOps)
  533. return (int)LNumOps - (int)RNumOps;
  534. for (unsigned i = 0; i != LNumOps; ++i) {
  535. if (i >= RNumOps)
  536. return 1;
  537. long X = compare(LC->getOperand(i), RC->getOperand(i));
  538. if (X != 0)
  539. return X;
  540. }
  541. return (int)LNumOps - (int)RNumOps;
  542. }
  543. case scUDivExpr: {
  544. const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
  545. const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
  546. // Lexicographically compare udiv expressions.
  547. long X = compare(LC->getLHS(), RC->getLHS());
  548. if (X != 0)
  549. return X;
  550. return compare(LC->getRHS(), RC->getRHS());
  551. }
  552. case scTruncate:
  553. case scZeroExtend:
  554. case scSignExtend: {
  555. const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
  556. const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
  557. // Compare cast expressions by operand.
  558. return compare(LC->getOperand(), RC->getOperand());
  559. }
  560. case scCouldNotCompute:
  561. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  562. }
  563. llvm_unreachable("Unknown SCEV kind!");
  564. }
  565. };
  566. }
  567. /// GroupByComplexity - Given a list of SCEV objects, order them by their
  568. /// complexity, and group objects of the same complexity together by value.
  569. /// When this routine is finished, we know that any duplicates in the vector are
  570. /// consecutive and that complexity is monotonically increasing.
  571. ///
  572. /// Note that we go take special precautions to ensure that we get deterministic
  573. /// results from this routine. In other words, we don't want the results of
  574. /// this to depend on where the addresses of various SCEV objects happened to
  575. /// land in memory.
  576. ///
  577. static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
  578. LoopInfo *LI) {
  579. if (Ops.size() < 2) return; // Noop
  580. if (Ops.size() == 2) {
  581. // This is the common case, which also happens to be trivially simple.
  582. // Special case it.
  583. const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
  584. if (SCEVComplexityCompare(LI)(RHS, LHS))
  585. std::swap(LHS, RHS);
  586. return;
  587. }
  588. // Do the rough sort by complexity.
  589. std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
  590. // Now that we are sorted by complexity, group elements of the same
  591. // complexity. Note that this is, at worst, N^2, but the vector is likely to
  592. // be extremely short in practice. Note that we take this approach because we
  593. // do not want to depend on the addresses of the objects we are grouping.
  594. for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
  595. const SCEV *S = Ops[i];
  596. unsigned Complexity = S->getSCEVType();
  597. // If there are any objects of the same complexity and same value as this
  598. // one, group them.
  599. for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
  600. if (Ops[j] == S) { // Found a duplicate.
  601. // Move it to immediately after i'th element.
  602. std::swap(Ops[i+1], Ops[j]);
  603. ++i; // no need to rescan it.
  604. if (i == e-2) return; // Done!
  605. }
  606. }
  607. }
  608. }
  609. namespace {
  610. struct FindSCEVSize {
  611. int Size;
  612. FindSCEVSize() : Size(0) {}
  613. bool follow(const SCEV *S) {
  614. ++Size;
  615. // Keep looking at all operands of S.
  616. return true;
  617. }
  618. bool isDone() const {
  619. return false;
  620. }
  621. };
  622. }
  623. // Returns the size of the SCEV S.
  624. static inline int sizeOfSCEV(const SCEV *S) {
  625. FindSCEVSize F;
  626. SCEVTraversal<FindSCEVSize> ST(F);
  627. ST.visitAll(S);
  628. return F.Size;
  629. }
  630. namespace {
  631. struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
  632. public:
  633. // Computes the Quotient and Remainder of the division of Numerator by
  634. // Denominator.
  635. static void divide(ScalarEvolution &SE, const SCEV *Numerator,
  636. const SCEV *Denominator, const SCEV **Quotient,
  637. const SCEV **Remainder) {
  638. assert(Numerator && Denominator && "Uninitialized SCEV");
  639. SCEVDivision D(SE, Numerator, Denominator);
  640. // Check for the trivial case here to avoid having to check for it in the
  641. // rest of the code.
  642. if (Numerator == Denominator) {
  643. *Quotient = D.One;
  644. *Remainder = D.Zero;
  645. return;
  646. }
  647. if (Numerator->isZero()) {
  648. *Quotient = D.Zero;
  649. *Remainder = D.Zero;
  650. return;
  651. }
  652. // A simple case when N/1. The quotient is N.
  653. if (Denominator->isOne()) {
  654. *Quotient = Numerator;
  655. *Remainder = D.Zero;
  656. return;
  657. }
  658. // Split the Denominator when it is a product.
  659. if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) {
  660. const SCEV *Q, *R;
  661. *Quotient = Numerator;
  662. for (const SCEV *Op : T->operands()) {
  663. divide(SE, *Quotient, Op, &Q, &R);
  664. *Quotient = Q;
  665. // Bail out when the Numerator is not divisible by one of the terms of
  666. // the Denominator.
  667. if (!R->isZero()) {
  668. *Quotient = D.Zero;
  669. *Remainder = Numerator;
  670. return;
  671. }
  672. }
  673. *Remainder = D.Zero;
  674. return;
  675. }
  676. D.visit(Numerator);
  677. *Quotient = D.Quotient;
  678. *Remainder = D.Remainder;
  679. }
  680. // Except in the trivial case described above, we do not know how to divide
  681. // Expr by Denominator for the following functions with empty implementation.
  682. void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
  683. void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
  684. void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
  685. void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
  686. void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
  687. void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
  688. void visitUnknown(const SCEVUnknown *Numerator) {}
  689. void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}
  690. void visitConstant(const SCEVConstant *Numerator) {
  691. if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) {
  692. APInt NumeratorVal = Numerator->getValue()->getValue();
  693. APInt DenominatorVal = D->getValue()->getValue();
  694. uint32_t NumeratorBW = NumeratorVal.getBitWidth();
  695. uint32_t DenominatorBW = DenominatorVal.getBitWidth();
  696. if (NumeratorBW > DenominatorBW)
  697. DenominatorVal = DenominatorVal.sext(NumeratorBW);
  698. else if (NumeratorBW < DenominatorBW)
  699. NumeratorVal = NumeratorVal.sext(DenominatorBW);
  700. APInt QuotientVal(NumeratorVal.getBitWidth(), 0);
  701. APInt RemainderVal(NumeratorVal.getBitWidth(), 0);
  702. APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal);
  703. Quotient = SE.getConstant(QuotientVal);
  704. Remainder = SE.getConstant(RemainderVal);
  705. return;
  706. }
  707. }
  708. void visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
  709. const SCEV *StartQ, *StartR, *StepQ, *StepR;
  710. assert(Numerator->isAffine() && "Numerator should be affine");
  711. divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
  712. divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
  713. // Bail out if the types do not match.
  714. Type *Ty = Denominator->getType();
  715. if (Ty != StartQ->getType() || Ty != StartR->getType() ||
  716. Ty != StepQ->getType() || Ty != StepR->getType()) {
  717. Quotient = Zero;
  718. Remainder = Numerator;
  719. return;
  720. }
  721. Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
  722. Numerator->getNoWrapFlags());
  723. Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
  724. Numerator->getNoWrapFlags());
  725. }
  726. void visitAddExpr(const SCEVAddExpr *Numerator) {
  727. SmallVector<const SCEV *, 2> Qs, Rs;
  728. Type *Ty = Denominator->getType();
  729. for (const SCEV *Op : Numerator->operands()) {
  730. const SCEV *Q, *R;
  731. divide(SE, Op, Denominator, &Q, &R);
  732. // Bail out if types do not match.
  733. if (Ty != Q->getType() || Ty != R->getType()) {
  734. Quotient = Zero;
  735. Remainder = Numerator;
  736. return;
  737. }
  738. Qs.push_back(Q);
  739. Rs.push_back(R);
  740. }
  741. if (Qs.size() == 1) {
  742. Quotient = Qs[0];
  743. Remainder = Rs[0];
  744. return;
  745. }
  746. Quotient = SE.getAddExpr(Qs);
  747. Remainder = SE.getAddExpr(Rs);
  748. }
  749. void visitMulExpr(const SCEVMulExpr *Numerator) {
  750. SmallVector<const SCEV *, 2> Qs;
  751. Type *Ty = Denominator->getType();
  752. bool FoundDenominatorTerm = false;
  753. for (const SCEV *Op : Numerator->operands()) {
  754. // Bail out if types do not match.
  755. if (Ty != Op->getType()) {
  756. Quotient = Zero;
  757. Remainder = Numerator;
  758. return;
  759. }
  760. if (FoundDenominatorTerm) {
  761. Qs.push_back(Op);
  762. continue;
  763. }
  764. // Check whether Denominator divides one of the product operands.
  765. const SCEV *Q, *R;
  766. divide(SE, Op, Denominator, &Q, &R);
  767. if (!R->isZero()) {
  768. Qs.push_back(Op);
  769. continue;
  770. }
  771. // Bail out if types do not match.
  772. if (Ty != Q->getType()) {
  773. Quotient = Zero;
  774. Remainder = Numerator;
  775. return;
  776. }
  777. FoundDenominatorTerm = true;
  778. Qs.push_back(Q);
  779. }
  780. if (FoundDenominatorTerm) {
  781. Remainder = Zero;
  782. if (Qs.size() == 1)
  783. Quotient = Qs[0];
  784. else
  785. Quotient = SE.getMulExpr(Qs);
  786. return;
  787. }
  788. if (!isa<SCEVUnknown>(Denominator)) {
  789. Quotient = Zero;
  790. Remainder = Numerator;
  791. return;
  792. }
  793. // The Remainder is obtained by replacing Denominator by 0 in Numerator.
  794. ValueToValueMap RewriteMap;
  795. RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
  796. cast<SCEVConstant>(Zero)->getValue();
  797. Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
  798. if (Remainder->isZero()) {
  799. // The Quotient is obtained by replacing Denominator by 1 in Numerator.
  800. RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
  801. cast<SCEVConstant>(One)->getValue();
  802. Quotient =
  803. SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
  804. return;
  805. }
  806. // Quotient is (Numerator - Remainder) divided by Denominator.
  807. const SCEV *Q, *R;
  808. const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder);
  809. if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) {
  810. // This SCEV does not seem to simplify: fail the division here.
  811. Quotient = Zero;
  812. Remainder = Numerator;
  813. return;
  814. }
  815. divide(SE, Diff, Denominator, &Q, &R);
  816. assert(R == Zero &&
  817. "(Numerator - Remainder) should evenly divide Denominator");
  818. Quotient = Q;
  819. }
  820. private:
  821. SCEVDivision(ScalarEvolution &S, const SCEV *Numerator,
  822. const SCEV *Denominator)
  823. : SE(S), Denominator(Denominator) {
  824. Zero = SE.getConstant(Denominator->getType(), 0);
  825. One = SE.getConstant(Denominator->getType(), 1);
  826. // By default, we don't know how to divide Expr by Denominator.
  827. // Providing the default here simplifies the rest of the code.
  828. Quotient = Zero;
  829. Remainder = Numerator;
  830. }
  831. ScalarEvolution &SE;
  832. const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
  833. };
  834. }
  835. //===----------------------------------------------------------------------===//
  836. // Simple SCEV method implementations
  837. //===----------------------------------------------------------------------===//
  838. /// BinomialCoefficient - Compute BC(It, K). The result has width W.
  839. /// Assume, K > 0.
  840. static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
  841. ScalarEvolution &SE,
  842. Type *ResultTy) {
  843. // Handle the simplest case efficiently.
  844. if (K == 1)
  845. return SE.getTruncateOrZeroExtend(It, ResultTy);
  846. // We are using the following formula for BC(It, K):
  847. //
  848. // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
  849. //
  850. // Suppose, W is the bitwidth of the return value. We must be prepared for
  851. // overflow. Hence, we must assure that the result of our computation is
  852. // equal to the accurate one modulo 2^W. Unfortunately, division isn't
  853. // safe in modular arithmetic.
  854. //
  855. // However, this code doesn't use exactly that formula; the formula it uses
  856. // is something like the following, where T is the number of factors of 2 in
  857. // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
  858. // exponentiation:
  859. //
  860. // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
  861. //
  862. // This formula is trivially equivalent to the previous formula. However,
  863. // this formula can be implemented much more efficiently. The trick is that
  864. // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
  865. // arithmetic. To do exact division in modular arithmetic, all we have
  866. // to do is multiply by the inverse. Therefore, this step can be done at
  867. // width W.
  868. //
  869. // The next issue is how to safely do the division by 2^T. The way this
  870. // is done is by doing the multiplication step at a width of at least W + T
  871. // bits. This way, the bottom W+T bits of the product are accurate. Then,
  872. // when we perform the division by 2^T (which is equivalent to a right shift
  873. // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
  874. // truncated out after the division by 2^T.
  875. //
  876. // In comparison to just directly using the first formula, this technique
  877. // is much more efficient; using the first formula requires W * K bits,
  878. // but this formula less than W + K bits. Also, the first formula requires
  879. // a division step, whereas this formula only requires multiplies and shifts.
  880. //
  881. // It doesn't matter whether the subtraction step is done in the calculation
  882. // width or the input iteration count's width; if the subtraction overflows,
  883. // the result must be zero anyway. We prefer here to do it in the width of
  884. // the induction variable because it helps a lot for certain cases; CodeGen
  885. // isn't smart enough to ignore the overflow, which leads to much less
  886. // efficient code if the width of the subtraction is wider than the native
  887. // register width.
  888. //
  889. // (It's possible to not widen at all by pulling out factors of 2 before
  890. // the multiplication; for example, K=2 can be calculated as
  891. // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
  892. // extra arithmetic, so it's not an obvious win, and it gets
  893. // much more complicated for K > 3.)
  894. // Protection from insane SCEVs; this bound is conservative,
  895. // but it probably doesn't matter.
  896. if (K > 1000)
  897. return SE.getCouldNotCompute();
  898. unsigned W = SE.getTypeSizeInBits(ResultTy);
  899. // Calculate K! / 2^T and T; we divide out the factors of two before
  900. // multiplying for calculating K! / 2^T to avoid overflow.
  901. // Other overflow doesn't matter because we only care about the bottom
  902. // W bits of the result.
  903. APInt OddFactorial(W, 1);
  904. unsigned T = 1;
  905. for (unsigned i = 3; i <= K; ++i) {
  906. APInt Mult(W, i);
  907. unsigned TwoFactors = Mult.countTrailingZeros();
  908. T += TwoFactors;
  909. Mult = Mult.lshr(TwoFactors);
  910. OddFactorial *= Mult;
  911. }
  912. // We need at least W + T bits for the multiplication step
  913. unsigned CalculationBits = W + T;
  914. // Calculate 2^T, at width T+W.
  915. APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
  916. // Calculate the multiplicative inverse of K! / 2^T;
  917. // this multiplication factor will perform the exact division by
  918. // K! / 2^T.
  919. APInt Mod = APInt::getSignedMinValue(W+1);
  920. APInt MultiplyFactor = OddFactorial.zext(W+1);
  921. MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
  922. MultiplyFactor = MultiplyFactor.trunc(W);
  923. // Calculate the product, at width T+W
  924. IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
  925. CalculationBits);
  926. const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
  927. for (unsigned i = 1; i != K; ++i) {
  928. const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
  929. Dividend = SE.getMulExpr(Dividend,
  930. SE.getTruncateOrZeroExtend(S, CalculationTy));
  931. }
  932. // Divide by 2^T
  933. const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
  934. // Truncate the result, and divide by K! / 2^T.
  935. return SE.getMulExpr(SE.getConstant(MultiplyFactor),
  936. SE.getTruncateOrZeroExtend(DivResult, ResultTy));
  937. }
  938. /// evaluateAtIteration - Return the value of this chain of recurrences at
  939. /// the specified iteration number. We can evaluate this recurrence by
  940. /// multiplying each element in the chain by the binomial coefficient
  941. /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
  942. ///
  943. /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
  944. ///
  945. /// where BC(It, k) stands for binomial coefficient.
  946. ///
  947. const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
  948. ScalarEvolution &SE) const {
  949. const SCEV *Result = getStart();
  950. for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
  951. // The computation is correct in the face of overflow provided that the
  952. // multiplication is performed _after_ the evaluation of the binomial
  953. // coefficient.
  954. const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
  955. if (isa<SCEVCouldNotCompute>(Coeff))
  956. return Coeff;
  957. Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
  958. }
  959. return Result;
  960. }
  961. //===----------------------------------------------------------------------===//
  962. // SCEV Expression folder implementations
  963. //===----------------------------------------------------------------------===//
  964. const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
  965. Type *Ty) {
  966. assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
  967. "This is not a truncating conversion!");
  968. assert(isSCEVable(Ty) &&
  969. "This is not a conversion to a SCEVable type!");
  970. Ty = getEffectiveSCEVType(Ty);
  971. FoldingSetNodeID ID;
  972. ID.AddInteger(scTruncate);
  973. ID.AddPointer(Op);
  974. ID.AddPointer(Ty);
  975. void *IP = nullptr;
  976. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  977. // Fold if the operand is constant.
  978. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  979. return getConstant(
  980. cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
  981. // trunc(trunc(x)) --> trunc(x)
  982. if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
  983. return getTruncateExpr(ST->getOperand(), Ty);
  984. // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
  985. if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
  986. return getTruncateOrSignExtend(SS->getOperand(), Ty);
  987. // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
  988. if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
  989. return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
  990. // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
  991. // eliminate all the truncates, or we replace other casts with truncates.
  992. if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
  993. SmallVector<const SCEV *, 4> Operands;
  994. bool hasTrunc = false;
  995. for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
  996. const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
  997. if (!isa<SCEVCastExpr>(SA->getOperand(i)))
  998. hasTrunc = isa<SCEVTruncateExpr>(S);
  999. Operands.push_back(S);
  1000. }
  1001. if (!hasTrunc)
  1002. return getAddExpr(Operands);
  1003. UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
  1004. }
  1005. // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
  1006. // eliminate all the truncates, or we replace other casts with truncates.
  1007. if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
  1008. SmallVector<const SCEV *, 4> Operands;
  1009. bool hasTrunc = false;
  1010. for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
  1011. const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
  1012. if (!isa<SCEVCastExpr>(SM->getOperand(i)))
  1013. hasTrunc = isa<SCEVTruncateExpr>(S);
  1014. Operands.push_back(S);
  1015. }
  1016. if (!hasTrunc)
  1017. return getMulExpr(Operands);
  1018. UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
  1019. }
  1020. // If the input value is a chrec scev, truncate the chrec's operands.
  1021. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
  1022. SmallVector<const SCEV *, 4> Operands;
  1023. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
  1024. Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
  1025. return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
  1026. }
  1027. // The cast wasn't folded; create an explicit cast node. We can reuse
  1028. // the existing insert position since if we get here, we won't have
  1029. // made any changes which would invalidate it.
  1030. SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
  1031. Op, Ty);
  1032. UniqueSCEVs.InsertNode(S, IP);
  1033. return S;
  1034. }
  1035. // Get the limit of a recurrence such that incrementing by Step cannot cause
  1036. // signed overflow as long as the value of the recurrence within the
  1037. // loop does not exceed this limit before incrementing.
  1038. static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
  1039. ICmpInst::Predicate *Pred,
  1040. ScalarEvolution *SE) {
  1041. unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
  1042. if (SE->isKnownPositive(Step)) {
  1043. *Pred = ICmpInst::ICMP_SLT;
  1044. return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
  1045. SE->getSignedRange(Step).getSignedMax());
  1046. }
  1047. if (SE->isKnownNegative(Step)) {
  1048. *Pred = ICmpInst::ICMP_SGT;
  1049. return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
  1050. SE->getSignedRange(Step).getSignedMin());
  1051. }
  1052. return nullptr;
  1053. }
  1054. // Get the limit of a recurrence such that incrementing by Step cannot cause
  1055. // unsigned overflow as long as the value of the recurrence within the loop does
  1056. // not exceed this limit before incrementing.
  1057. static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
  1058. ICmpInst::Predicate *Pred,
  1059. ScalarEvolution *SE) {
  1060. unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
  1061. *Pred = ICmpInst::ICMP_ULT;
  1062. return SE->getConstant(APInt::getMinValue(BitWidth) -
  1063. SE->getUnsignedRange(Step).getUnsignedMax());
  1064. }
  1065. namespace {
  1066. struct ExtendOpTraitsBase {
  1067. typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *);
  1068. };
  1069. // Used to make code generic over signed and unsigned overflow.
  1070. template <typename ExtendOp> struct ExtendOpTraits {
  1071. // Members present:
  1072. //
  1073. // static const SCEV::NoWrapFlags WrapType;
  1074. //
  1075. // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
  1076. //
  1077. // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
  1078. // ICmpInst::Predicate *Pred,
  1079. // ScalarEvolution *SE);
  1080. };
  1081. template <>
  1082. struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
  1083. static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
  1084. static const GetExtendExprTy GetExtendExpr;
  1085. static const SCEV *getOverflowLimitForStep(const SCEV *Step,
  1086. ICmpInst::Predicate *Pred,
  1087. ScalarEvolution *SE) {
  1088. return getSignedOverflowLimitForStep(Step, Pred, SE);
  1089. }
  1090. };
  1091. const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
  1092. SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
  1093. template <>
  1094. struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
  1095. static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
  1096. static const GetExtendExprTy GetExtendExpr;
  1097. static const SCEV *getOverflowLimitForStep(const SCEV *Step,
  1098. ICmpInst::Predicate *Pred,
  1099. ScalarEvolution *SE) {
  1100. return getUnsignedOverflowLimitForStep(Step, Pred, SE);
  1101. }
  1102. };
  1103. const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
  1104. SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
  1105. }
  1106. // The recurrence AR has been shown to have no signed/unsigned wrap or something
  1107. // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
  1108. // easily prove NSW/NUW for its preincrement or postincrement sibling. This
  1109. // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
  1110. // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
  1111. // expression "Step + sext/zext(PreIncAR)" is congruent with
  1112. // "sext/zext(PostIncAR)"
  1113. template <typename ExtendOpTy>
  1114. static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
  1115. ScalarEvolution *SE) {
  1116. auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
  1117. auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
  1118. const Loop *L = AR->getLoop();
  1119. const SCEV *Start = AR->getStart();
  1120. const SCEV *Step = AR->getStepRecurrence(*SE);
  1121. // Check for a simple looking step prior to loop entry.
  1122. const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
  1123. if (!SA)
  1124. return nullptr;
  1125. // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
  1126. // subtraction is expensive. For this purpose, perform a quick and dirty
  1127. // difference, by checking for Step in the operand list.
  1128. SmallVector<const SCEV *, 4> DiffOps;
  1129. for (const SCEV *Op : SA->operands())
  1130. if (Op != Step)
  1131. DiffOps.push_back(Op);
  1132. if (DiffOps.size() == SA->getNumOperands())
  1133. return nullptr;
  1134. // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
  1135. // `Step`:
  1136. // 1. NSW/NUW flags on the step increment.
  1137. const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
  1138. const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
  1139. SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
  1140. // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
  1141. // "S+X does not sign/unsign-overflow".
  1142. //
  1143. const SCEV *BECount = SE->getBackedgeTakenCount(L);
  1144. if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
  1145. !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
  1146. return PreStart;
  1147. // 2. Direct overflow check on the step operation's expression.
  1148. unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
  1149. Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
  1150. const SCEV *OperandExtendedStart =
  1151. SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy),
  1152. (SE->*GetExtendExpr)(Step, WideTy));
  1153. if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) {
  1154. if (PreAR && AR->getNoWrapFlags(WrapType)) {
  1155. // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
  1156. // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
  1157. // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
  1158. const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType);
  1159. }
  1160. return PreStart;
  1161. }
  1162. // 3. Loop precondition.
  1163. ICmpInst::Predicate Pred;
  1164. const SCEV *OverflowLimit =
  1165. ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
  1166. if (OverflowLimit &&
  1167. SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
  1168. return PreStart;
  1169. }
  1170. return nullptr;
  1171. }
  1172. // Get the normalized zero or sign extended expression for this AddRec's Start.
  1173. template <typename ExtendOpTy>
  1174. static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
  1175. ScalarEvolution *SE) {
  1176. auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
  1177. const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE);
  1178. if (!PreStart)
  1179. return (SE->*GetExtendExpr)(AR->getStart(), Ty);
  1180. return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty),
  1181. (SE->*GetExtendExpr)(PreStart, Ty));
  1182. }
  1183. // Try to prove away overflow by looking at "nearby" add recurrences. A
  1184. // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
  1185. // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
  1186. //
  1187. // Formally:
  1188. //
  1189. // {S,+,X} == {S-T,+,X} + T
  1190. // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
  1191. //
  1192. // If ({S-T,+,X} + T) does not overflow ... (1)
  1193. //
  1194. // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
  1195. //
  1196. // If {S-T,+,X} does not overflow ... (2)
  1197. //
  1198. // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
  1199. // == {Ext(S-T)+Ext(T),+,Ext(X)}
  1200. //
  1201. // If (S-T)+T does not overflow ... (3)
  1202. //
  1203. // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
  1204. // == {Ext(S),+,Ext(X)} == LHS
  1205. //
  1206. // Thus, if (1), (2) and (3) are true for some T, then
  1207. // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
  1208. //
  1209. // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
  1210. // does not overflow" restricted to the 0th iteration. Therefore we only need
  1211. // to check for (1) and (2).
  1212. //
  1213. // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
  1214. // is `Delta` (defined below).
  1215. //
  1216. template <typename ExtendOpTy>
  1217. bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
  1218. const SCEV *Step,
  1219. const Loop *L) {
  1220. auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
  1221. // We restrict `Start` to a constant to prevent SCEV from spending too much
  1222. // time here. It is correct (but more expensive) to continue with a
  1223. // non-constant `Start` and do a general SCEV subtraction to compute
  1224. // `PreStart` below.
  1225. //
  1226. const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
  1227. if (!StartC)
  1228. return false;
  1229. APInt StartAI = StartC->getValue()->getValue();
  1230. for (unsigned Delta : {-2, -1, 1, 2}) {
  1231. const SCEV *PreStart = getConstant(StartAI - Delta);
  1232. // Give up if we don't already have the add recurrence we need because
  1233. // actually constructing an add recurrence is relatively expensive.
  1234. const SCEVAddRecExpr *PreAR = [&]() {
  1235. FoldingSetNodeID ID;
  1236. ID.AddInteger(scAddRecExpr);
  1237. ID.AddPointer(PreStart);
  1238. ID.AddPointer(Step);
  1239. ID.AddPointer(L);
  1240. void *IP = nullptr;
  1241. return static_cast<SCEVAddRecExpr *>(
  1242. this->UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  1243. }();
  1244. if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
  1245. const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
  1246. ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
  1247. const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
  1248. DeltaS, &Pred, this);
  1249. if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
  1250. return true;
  1251. }
  1252. }
  1253. return false;
  1254. }
  1255. const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
  1256. Type *Ty) {
  1257. assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
  1258. "This is not an extending conversion!");
  1259. assert(isSCEVable(Ty) &&
  1260. "This is not a conversion to a SCEVable type!");
  1261. Ty = getEffectiveSCEVType(Ty);
  1262. // Fold if the operand is constant.
  1263. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  1264. return getConstant(
  1265. cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
  1266. // zext(zext(x)) --> zext(x)
  1267. if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
  1268. return getZeroExtendExpr(SZ->getOperand(), Ty);
  1269. // Before doing any expensive analysis, check to see if we've already
  1270. // computed a SCEV for this Op and Ty.
  1271. FoldingSetNodeID ID;
  1272. ID.AddInteger(scZeroExtend);
  1273. ID.AddPointer(Op);
  1274. ID.AddPointer(Ty);
  1275. void *IP = nullptr;
  1276. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1277. // zext(trunc(x)) --> zext(x) or x or trunc(x)
  1278. if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
  1279. // It's possible the bits taken off by the truncate were all zero bits. If
  1280. // so, we should be able to simplify this further.
  1281. const SCEV *X = ST->getOperand();
  1282. ConstantRange CR = getUnsignedRange(X);
  1283. unsigned TruncBits = getTypeSizeInBits(ST->getType());
  1284. unsigned NewBits = getTypeSizeInBits(Ty);
  1285. if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
  1286. CR.zextOrTrunc(NewBits)))
  1287. return getTruncateOrZeroExtend(X, Ty);
  1288. }
  1289. // If the input value is a chrec scev, and we can prove that the value
  1290. // did not overflow the old, smaller, value, we can zero extend all of the
  1291. // operands (often constants). This allows analysis of something like
  1292. // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
  1293. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
  1294. if (AR->isAffine()) {
  1295. const SCEV *Start = AR->getStart();
  1296. const SCEV *Step = AR->getStepRecurrence(*this);
  1297. unsigned BitWidth = getTypeSizeInBits(AR->getType());
  1298. const Loop *L = AR->getLoop();
  1299. // If we have special knowledge that this addrec won't overflow,
  1300. // we don't need to do any further analysis.
  1301. if (AR->getNoWrapFlags(SCEV::FlagNUW))
  1302. return getAddRecExpr(
  1303. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1304. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1305. // Check whether the backedge-taken count is SCEVCouldNotCompute.
  1306. // Note that this serves two purposes: It filters out loops that are
  1307. // simply not analyzable, and it covers the case where this code is
  1308. // being called from within backedge-taken count analysis, such that
  1309. // attempting to ask for the backedge-taken count would likely result
  1310. // in infinite recursion. In the later case, the analysis code will
  1311. // cope with a conservative value, and it will take care to purge
  1312. // that value once it has finished.
  1313. const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
  1314. if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
  1315. // Manually compute the final value for AR, checking for
  1316. // overflow.
  1317. // Check whether the backedge-taken count can be losslessly casted to
  1318. // the addrec's type. The count is always unsigned.
  1319. const SCEV *CastedMaxBECount =
  1320. getTruncateOrZeroExtend(MaxBECount, Start->getType());
  1321. const SCEV *RecastedMaxBECount =
  1322. getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
  1323. if (MaxBECount == RecastedMaxBECount) {
  1324. Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
  1325. // Check whether Start+Step*MaxBECount has no unsigned overflow.
  1326. const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
  1327. const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
  1328. const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
  1329. const SCEV *WideMaxBECount =
  1330. getZeroExtendExpr(CastedMaxBECount, WideTy);
  1331. const SCEV *OperandExtendedAdd =
  1332. getAddExpr(WideStart,
  1333. getMulExpr(WideMaxBECount,
  1334. getZeroExtendExpr(Step, WideTy)));
  1335. if (ZAdd == OperandExtendedAdd) {
  1336. // Cache knowledge of AR NUW, which is propagated to this AddRec.
  1337. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
  1338. // Return the expression with the addrec on the outside.
  1339. return getAddRecExpr(
  1340. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1341. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1342. }
  1343. // Similar to above, only this time treat the step value as signed.
  1344. // This covers loops that count down.
  1345. OperandExtendedAdd =
  1346. getAddExpr(WideStart,
  1347. getMulExpr(WideMaxBECount,
  1348. getSignExtendExpr(Step, WideTy)));
  1349. if (ZAdd == OperandExtendedAdd) {
  1350. // Cache knowledge of AR NW, which is propagated to this AddRec.
  1351. // Negative step causes unsigned wrap, but it still can't self-wrap.
  1352. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
  1353. // Return the expression with the addrec on the outside.
  1354. return getAddRecExpr(
  1355. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1356. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1357. }
  1358. }
  1359. // If the backedge is guarded by a comparison with the pre-inc value
  1360. // the addrec is safe. Also, if the entry is guarded by a comparison
  1361. // with the start value and the backedge is guarded by a comparison
  1362. // with the post-inc value, the addrec is safe.
  1363. if (isKnownPositive(Step)) {
  1364. const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
  1365. getUnsignedRange(Step).getUnsignedMax());
  1366. if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
  1367. (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
  1368. isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
  1369. AR->getPostIncExpr(*this), N))) {
  1370. // Cache knowledge of AR NUW, which is propagated to this AddRec.
  1371. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
  1372. // Return the expression with the addrec on the outside.
  1373. return getAddRecExpr(
  1374. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1375. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1376. }
  1377. } else if (isKnownNegative(Step)) {
  1378. const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
  1379. getSignedRange(Step).getSignedMin());
  1380. if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
  1381. (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
  1382. isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
  1383. AR->getPostIncExpr(*this), N))) {
  1384. // Cache knowledge of AR NW, which is propagated to this AddRec.
  1385. // Negative step causes unsigned wrap, but it still can't self-wrap.
  1386. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
  1387. // Return the expression with the addrec on the outside.
  1388. return getAddRecExpr(
  1389. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1390. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1391. }
  1392. }
  1393. }
  1394. if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
  1395. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
  1396. return getAddRecExpr(
  1397. getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this),
  1398. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1399. }
  1400. }
  1401. // The cast wasn't folded; create an explicit cast node.
  1402. // Recompute the insert position, as it may have been invalidated.
  1403. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1404. SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
  1405. Op, Ty);
  1406. UniqueSCEVs.InsertNode(S, IP);
  1407. return S;
  1408. }
  1409. const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
  1410. Type *Ty) {
  1411. assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
  1412. "This is not an extending conversion!");
  1413. assert(isSCEVable(Ty) &&
  1414. "This is not a conversion to a SCEVable type!");
  1415. Ty = getEffectiveSCEVType(Ty);
  1416. // Fold if the operand is constant.
  1417. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  1418. return getConstant(
  1419. cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
  1420. // sext(sext(x)) --> sext(x)
  1421. if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
  1422. return getSignExtendExpr(SS->getOperand(), Ty);
  1423. // sext(zext(x)) --> zext(x)
  1424. if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
  1425. return getZeroExtendExpr(SZ->getOperand(), Ty);
  1426. // Before doing any expensive analysis, check to see if we've already
  1427. // computed a SCEV for this Op and Ty.
  1428. FoldingSetNodeID ID;
  1429. ID.AddInteger(scSignExtend);
  1430. ID.AddPointer(Op);
  1431. ID.AddPointer(Ty);
  1432. void *IP = nullptr;
  1433. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1434. // If the input value is provably positive, build a zext instead.
  1435. if (isKnownNonNegative(Op))
  1436. return getZeroExtendExpr(Op, Ty);
  1437. // sext(trunc(x)) --> sext(x) or x or trunc(x)
  1438. if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
  1439. // It's possible the bits taken off by the truncate were all sign bits. If
  1440. // so, we should be able to simplify this further.
  1441. const SCEV *X = ST->getOperand();
  1442. ConstantRange CR = getSignedRange(X);
  1443. unsigned TruncBits = getTypeSizeInBits(ST->getType());
  1444. unsigned NewBits = getTypeSizeInBits(Ty);
  1445. if (CR.truncate(TruncBits).signExtend(NewBits).contains(
  1446. CR.sextOrTrunc(NewBits)))
  1447. return getTruncateOrSignExtend(X, Ty);
  1448. }
  1449. // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2
  1450. if (auto SA = dyn_cast<SCEVAddExpr>(Op)) {
  1451. if (SA->getNumOperands() == 2) {
  1452. auto SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0));
  1453. auto SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1));
  1454. if (SMul && SC1) {
  1455. if (auto SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) {
  1456. const APInt &C1 = SC1->getValue()->getValue();
  1457. const APInt &C2 = SC2->getValue()->getValue();
  1458. if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
  1459. C2.ugt(C1) && C2.isPowerOf2())
  1460. return getAddExpr(getSignExtendExpr(SC1, Ty),
  1461. getSignExtendExpr(SMul, Ty));
  1462. }
  1463. }
  1464. }
  1465. }
  1466. // If the input value is a chrec scev, and we can prove that the value
  1467. // did not overflow the old, smaller, value, we can sign extend all of the
  1468. // operands (often constants). This allows analysis of something like
  1469. // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
  1470. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
  1471. if (AR->isAffine()) {
  1472. const SCEV *Start = AR->getStart();
  1473. const SCEV *Step = AR->getStepRecurrence(*this);
  1474. unsigned BitWidth = getTypeSizeInBits(AR->getType());
  1475. const Loop *L = AR->getLoop();
  1476. // If we have special knowledge that this addrec won't overflow,
  1477. // we don't need to do any further analysis.
  1478. if (AR->getNoWrapFlags(SCEV::FlagNSW))
  1479. return getAddRecExpr(
  1480. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1481. getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW);
  1482. // Check whether the backedge-taken count is SCEVCouldNotCompute.
  1483. // Note that this serves two purposes: It filters out loops that are
  1484. // simply not analyzable, and it covers the case where this code is
  1485. // being called from within backedge-taken count analysis, such that
  1486. // attempting to ask for the backedge-taken count would likely result
  1487. // in infinite recursion. In the later case, the analysis code will
  1488. // cope with a conservative value, and it will take care to purge
  1489. // that value once it has finished.
  1490. const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
  1491. if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
  1492. // Manually compute the final value for AR, checking for
  1493. // overflow.
  1494. // Check whether the backedge-taken count can be losslessly casted to
  1495. // the addrec's type. The count is always unsigned.
  1496. const SCEV *CastedMaxBECount =
  1497. getTruncateOrZeroExtend(MaxBECount, Start->getType());
  1498. const SCEV *RecastedMaxBECount =
  1499. getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
  1500. if (MaxBECount == RecastedMaxBECount) {
  1501. Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
  1502. // Check whether Start+Step*MaxBECount has no signed overflow.
  1503. const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
  1504. const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
  1505. const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
  1506. const SCEV *WideMaxBECount =
  1507. getZeroExtendExpr(CastedMaxBECount, WideTy);
  1508. const SCEV *OperandExtendedAdd =
  1509. getAddExpr(WideStart,
  1510. getMulExpr(WideMaxBECount,
  1511. getSignExtendExpr(Step, WideTy)));
  1512. if (SAdd == OperandExtendedAdd) {
  1513. // Cache knowledge of AR NSW, which is propagated to this AddRec.
  1514. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
  1515. // Return the expression with the addrec on the outside.
  1516. return getAddRecExpr(
  1517. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1518. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1519. }
  1520. // Similar to above, only this time treat the step value as unsigned.
  1521. // This covers loops that count up with an unsigned step.
  1522. OperandExtendedAdd =
  1523. getAddExpr(WideStart,
  1524. getMulExpr(WideMaxBECount,
  1525. getZeroExtendExpr(Step, WideTy)));
  1526. if (SAdd == OperandExtendedAdd) {
  1527. // If AR wraps around then
  1528. //
  1529. // abs(Step) * MaxBECount > unsigned-max(AR->getType())
  1530. // => SAdd != OperandExtendedAdd
  1531. //
  1532. // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
  1533. // (SAdd == OperandExtendedAdd => AR is NW)
  1534. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
  1535. // Return the expression with the addrec on the outside.
  1536. return getAddRecExpr(
  1537. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1538. getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1539. }
  1540. }
  1541. // If the backedge is guarded by a comparison with the pre-inc value
  1542. // the addrec is safe. Also, if the entry is guarded by a comparison
  1543. // with the start value and the backedge is guarded by a comparison
  1544. // with the post-inc value, the addrec is safe.
  1545. ICmpInst::Predicate Pred;
  1546. const SCEV *OverflowLimit =
  1547. getSignedOverflowLimitForStep(Step, &Pred, this);
  1548. if (OverflowLimit &&
  1549. (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
  1550. (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
  1551. isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
  1552. OverflowLimit)))) {
  1553. // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
  1554. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
  1555. return getAddRecExpr(
  1556. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1557. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1558. }
  1559. }
  1560. // If Start and Step are constants, check if we can apply this
  1561. // transformation:
  1562. // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2
  1563. auto SC1 = dyn_cast<SCEVConstant>(Start);
  1564. auto SC2 = dyn_cast<SCEVConstant>(Step);
  1565. if (SC1 && SC2) {
  1566. const APInt &C1 = SC1->getValue()->getValue();
  1567. const APInt &C2 = SC2->getValue()->getValue();
  1568. if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
  1569. C2.isPowerOf2()) {
  1570. Start = getSignExtendExpr(Start, Ty);
  1571. const SCEV *NewAR = getAddRecExpr(getConstant(AR->getType(), 0), Step,
  1572. L, AR->getNoWrapFlags());
  1573. return getAddExpr(Start, getSignExtendExpr(NewAR, Ty));
  1574. }
  1575. }
  1576. if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
  1577. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
  1578. return getAddRecExpr(
  1579. getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this),
  1580. getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
  1581. }
  1582. }
  1583. // The cast wasn't folded; create an explicit cast node.
  1584. // Recompute the insert position, as it may have been invalidated.
  1585. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1586. SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
  1587. Op, Ty);
  1588. UniqueSCEVs.InsertNode(S, IP);
  1589. return S;
  1590. }
  1591. /// getAnyExtendExpr - Return a SCEV for the given operand extended with
  1592. /// unspecified bits out to the given type.
  1593. ///
  1594. const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
  1595. Type *Ty) {
  1596. assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
  1597. "This is not an extending conversion!");
  1598. assert(isSCEVable(Ty) &&
  1599. "This is not a conversion to a SCEVable type!");
  1600. Ty = getEffectiveSCEVType(Ty);
  1601. // Sign-extend negative constants.
  1602. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  1603. if (SC->getValue()->getValue().isNegative())
  1604. return getSignExtendExpr(Op, Ty);
  1605. // Peel off a truncate cast.
  1606. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
  1607. const SCEV *NewOp = T->getOperand();
  1608. if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
  1609. return getAnyExtendExpr(NewOp, Ty);
  1610. return getTruncateOrNoop(NewOp, Ty);
  1611. }
  1612. // Next try a zext cast. If the cast is folded, use it.
  1613. const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
  1614. if (!isa<SCEVZeroExtendExpr>(ZExt))
  1615. return ZExt;
  1616. // Next try a sext cast. If the cast is folded, use it.
  1617. const SCEV *SExt = getSignExtendExpr(Op, Ty);
  1618. if (!isa<SCEVSignExtendExpr>(SExt))
  1619. return SExt;
  1620. // Force the cast to be folded into the operands of an addrec.
  1621. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
  1622. SmallVector<const SCEV *, 4> Ops;
  1623. for (const SCEV *Op : AR->operands())
  1624. Ops.push_back(getAnyExtendExpr(Op, Ty));
  1625. return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
  1626. }
  1627. // If the expression is obviously signed, use the sext cast value.
  1628. if (isa<SCEVSMaxExpr>(Op))
  1629. return SExt;
  1630. // Absent any other information, use the zext cast value.
  1631. return ZExt;
  1632. }
  1633. /// CollectAddOperandsWithScales - Process the given Ops list, which is
  1634. /// a list of operands to be added under the given scale, update the given
  1635. /// map. This is a helper function for getAddRecExpr. As an example of
  1636. /// what it does, given a sequence of operands that would form an add
  1637. /// expression like this:
  1638. ///
  1639. /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
  1640. ///
  1641. /// where A and B are constants, update the map with these values:
  1642. ///
  1643. /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
  1644. ///
  1645. /// and add 13 + A*B*29 to AccumulatedConstant.
  1646. /// This will allow getAddRecExpr to produce this:
  1647. ///
  1648. /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
  1649. ///
  1650. /// This form often exposes folding opportunities that are hidden in
  1651. /// the original operand list.
  1652. ///
  1653. /// Return true iff it appears that any interesting folding opportunities
  1654. /// may be exposed. This helps getAddRecExpr short-circuit extra work in
  1655. /// the common case where no interesting opportunities are present, and
  1656. /// is also used as a check to avoid infinite recursion.
  1657. ///
  1658. static bool
  1659. CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
  1660. SmallVectorImpl<const SCEV *> &NewOps,
  1661. APInt &AccumulatedConstant,
  1662. const SCEV *const *Ops, size_t NumOperands,
  1663. const APInt &Scale,
  1664. ScalarEvolution &SE) {
  1665. bool Interesting = false;
  1666. // Iterate over the add operands. They are sorted, with constants first.
  1667. unsigned i = 0;
  1668. while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
  1669. ++i;
  1670. // Pull a buried constant out to the outside.
  1671. if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
  1672. Interesting = true;
  1673. AccumulatedConstant += Scale * C->getValue()->getValue();
  1674. }
  1675. // Next comes everything else. We're especially interested in multiplies
  1676. // here, but they're in the middle, so just visit the rest with one loop.
  1677. for (; i != NumOperands; ++i) {
  1678. const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
  1679. if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
  1680. APInt NewScale =
  1681. Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
  1682. if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
  1683. // A multiplication of a constant with another add; recurse.
  1684. const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
  1685. Interesting |=
  1686. CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
  1687. Add->op_begin(), Add->getNumOperands(),
  1688. NewScale, SE);
  1689. } else {
  1690. // A multiplication of a constant with some other value. Update
  1691. // the map.
  1692. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
  1693. const SCEV *Key = SE.getMulExpr(MulOps);
  1694. std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
  1695. M.insert(std::make_pair(Key, NewScale));
  1696. if (Pair.second) {
  1697. NewOps.push_back(Pair.first->first);
  1698. } else {
  1699. Pair.first->second += NewScale;
  1700. // The map already had an entry for this value, which may indicate
  1701. // a folding opportunity.
  1702. Interesting = true;
  1703. }
  1704. }
  1705. } else {
  1706. // An ordinary operand. Update the map.
  1707. std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
  1708. M.insert(std::make_pair(Ops[i], Scale));
  1709. if (Pair.second) {
  1710. NewOps.push_back(Pair.first->first);
  1711. } else {
  1712. Pair.first->second += Scale;
  1713. // The map already had an entry for this value, which may indicate
  1714. // a folding opportunity.
  1715. Interesting = true;
  1716. }
  1717. }
  1718. }
  1719. return Interesting;
  1720. }
  1721. namespace {
  1722. struct APIntCompare {
  1723. bool operator()(const APInt &LHS, const APInt &RHS) const {
  1724. return LHS.ult(RHS);
  1725. }
  1726. };
  1727. }
  1728. // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
  1729. // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
  1730. // can't-overflow flags for the operation if possible.
  1731. static SCEV::NoWrapFlags
  1732. StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
  1733. const SmallVectorImpl<const SCEV *> &Ops,
  1734. SCEV::NoWrapFlags OldFlags) {
  1735. using namespace std::placeholders;
  1736. bool CanAnalyze =
  1737. Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
  1738. (void)CanAnalyze;
  1739. assert(CanAnalyze && "don't call from other places!");
  1740. int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
  1741. SCEV::NoWrapFlags SignOrUnsignWrap =
  1742. ScalarEvolution::maskFlags(OldFlags, SignOrUnsignMask);
  1743. // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
  1744. auto IsKnownNonNegative =
  1745. std::bind(std::mem_fn(&ScalarEvolution::isKnownNonNegative), SE, _1);
  1746. if (SignOrUnsignWrap == SCEV::FlagNSW &&
  1747. std::all_of(Ops.begin(), Ops.end(), IsKnownNonNegative))
  1748. return ScalarEvolution::setFlags(OldFlags,
  1749. (SCEV::NoWrapFlags)SignOrUnsignMask);
  1750. return OldFlags;
  1751. }
  1752. /// getAddExpr - Get a canonical add expression, or something simpler if
  1753. /// possible.
  1754. const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
  1755. SCEV::NoWrapFlags Flags) {
  1756. assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
  1757. "only nuw or nsw allowed");
  1758. assert(!Ops.empty() && "Cannot get empty add!");
  1759. if (Ops.size() == 1) return Ops[0];
  1760. #ifndef NDEBUG
  1761. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  1762. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  1763. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  1764. "SCEVAddExpr operand types don't match!");
  1765. #endif
  1766. Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags);
  1767. // Sort by complexity, this groups all similar expression types together.
  1768. GroupByComplexity(Ops, LI);
  1769. // If there are any constants, fold them together.
  1770. unsigned Idx = 0;
  1771. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  1772. ++Idx;
  1773. assert(Idx < Ops.size());
  1774. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  1775. // We found two constants, fold them together!
  1776. Ops[0] = getConstant(LHSC->getValue()->getValue() +
  1777. RHSC->getValue()->getValue());
  1778. if (Ops.size() == 2) return Ops[0];
  1779. Ops.erase(Ops.begin()+1); // Erase the folded element
  1780. LHSC = cast<SCEVConstant>(Ops[0]);
  1781. }
  1782. // If we are left with a constant zero being added, strip it off.
  1783. if (LHSC->getValue()->isZero()) {
  1784. Ops.erase(Ops.begin());
  1785. --Idx;
  1786. }
  1787. if (Ops.size() == 1) return Ops[0];
  1788. }
  1789. // Okay, check to see if the same value occurs in the operand list more than
  1790. // once. If so, merge them together into an multiply expression. Since we
  1791. // sorted the list, these values are required to be adjacent.
  1792. Type *Ty = Ops[0]->getType();
  1793. bool FoundMatch = false;
  1794. for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
  1795. if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
  1796. // Scan ahead to count how many equal operands there are.
  1797. unsigned Count = 2;
  1798. while (i+Count != e && Ops[i+Count] == Ops[i])
  1799. ++Count;
  1800. // Merge the values into a multiply.
  1801. const SCEV *Scale = getConstant(Ty, Count);
  1802. const SCEV *Mul = getMulExpr(Scale, Ops[i]);
  1803. if (Ops.size() == Count)
  1804. return Mul;
  1805. Ops[i] = Mul;
  1806. Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
  1807. --i; e -= Count - 1;
  1808. FoundMatch = true;
  1809. }
  1810. if (FoundMatch)
  1811. return getAddExpr(Ops, Flags);
  1812. // Check for truncates. If all the operands are truncated from the same
  1813. // type, see if factoring out the truncate would permit the result to be
  1814. // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
  1815. // if the contents of the resulting outer trunc fold to something simple.
  1816. for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
  1817. const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
  1818. Type *DstType = Trunc->getType();
  1819. Type *SrcType = Trunc->getOperand()->getType();
  1820. SmallVector<const SCEV *, 8> LargeOps;
  1821. bool Ok = true;
  1822. // Check all the operands to see if they can be represented in the
  1823. // source type of the truncate.
  1824. for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
  1825. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
  1826. if (T->getOperand()->getType() != SrcType) {
  1827. Ok = false;
  1828. break;
  1829. }
  1830. LargeOps.push_back(T->getOperand());
  1831. } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
  1832. LargeOps.push_back(getAnyExtendExpr(C, SrcType));
  1833. } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
  1834. SmallVector<const SCEV *, 8> LargeMulOps;
  1835. for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
  1836. if (const SCEVTruncateExpr *T =
  1837. dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
  1838. if (T->getOperand()->getType() != SrcType) {
  1839. Ok = false;
  1840. break;
  1841. }
  1842. LargeMulOps.push_back(T->getOperand());
  1843. } else if (const SCEVConstant *C =
  1844. dyn_cast<SCEVConstant>(M->getOperand(j))) {
  1845. LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
  1846. } else {
  1847. Ok = false;
  1848. break;
  1849. }
  1850. }
  1851. if (Ok)
  1852. LargeOps.push_back(getMulExpr(LargeMulOps));
  1853. } else {
  1854. Ok = false;
  1855. break;
  1856. }
  1857. }
  1858. if (Ok) {
  1859. // Evaluate the expression in the larger type.
  1860. const SCEV *Fold = getAddExpr(LargeOps, Flags);
  1861. // If it folds to something simple, use it. Otherwise, don't.
  1862. if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
  1863. return getTruncateExpr(Fold, DstType);
  1864. }
  1865. }
  1866. // Skip past any other cast SCEVs.
  1867. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
  1868. ++Idx;
  1869. // If there are add operands they would be next.
  1870. if (Idx < Ops.size()) {
  1871. bool DeletedAdd = false;
  1872. while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
  1873. // If we have an add, expand the add operands onto the end of the operands
  1874. // list.
  1875. Ops.erase(Ops.begin()+Idx);
  1876. Ops.append(Add->op_begin(), Add->op_end());
  1877. DeletedAdd = true;
  1878. }
  1879. // If we deleted at least one add, we added operands to the end of the list,
  1880. // and they are not necessarily sorted. Recurse to resort and resimplify
  1881. // any operands we just acquired.
  1882. if (DeletedAdd)
  1883. return getAddExpr(Ops);
  1884. }
  1885. // Skip over the add expression until we get to a multiply.
  1886. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
  1887. ++Idx;
  1888. // Check to see if there are any folding opportunities present with
  1889. // operands multiplied by constant values.
  1890. if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
  1891. uint64_t BitWidth = getTypeSizeInBits(Ty);
  1892. DenseMap<const SCEV *, APInt> M;
  1893. SmallVector<const SCEV *, 8> NewOps;
  1894. APInt AccumulatedConstant(BitWidth, 0);
  1895. if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
  1896. Ops.data(), Ops.size(),
  1897. APInt(BitWidth, 1), *this)) {
  1898. // Some interesting folding opportunity is present, so its worthwhile to
  1899. // re-generate the operands list. Group the operands by constant scale,
  1900. // to avoid multiplying by the same constant scale multiple times.
  1901. std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
  1902. for (SmallVectorImpl<const SCEV *>::const_iterator I = NewOps.begin(),
  1903. E = NewOps.end(); I != E; ++I)
  1904. MulOpLists[M.find(*I)->second].push_back(*I);
  1905. // Re-generate the operands list.
  1906. Ops.clear();
  1907. if (AccumulatedConstant != 0)
  1908. Ops.push_back(getConstant(AccumulatedConstant));
  1909. for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
  1910. I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
  1911. if (I->first != 0)
  1912. Ops.push_back(getMulExpr(getConstant(I->first),
  1913. getAddExpr(I->second)));
  1914. if (Ops.empty())
  1915. return getConstant(Ty, 0);
  1916. if (Ops.size() == 1)
  1917. return Ops[0];
  1918. return getAddExpr(Ops);
  1919. }
  1920. }
  1921. // If we are adding something to a multiply expression, make sure the
  1922. // something is not already an operand of the multiply. If so, merge it into
  1923. // the multiply.
  1924. for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
  1925. const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
  1926. for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
  1927. const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
  1928. if (isa<SCEVConstant>(MulOpSCEV))
  1929. continue;
  1930. for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
  1931. if (MulOpSCEV == Ops[AddOp]) {
  1932. // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
  1933. const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
  1934. if (Mul->getNumOperands() != 2) {
  1935. // If the multiply has more than two operands, we must get the
  1936. // Y*Z term.
  1937. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
  1938. Mul->op_begin()+MulOp);
  1939. MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
  1940. InnerMul = getMulExpr(MulOps);
  1941. }
  1942. const SCEV *One = getConstant(Ty, 1);
  1943. const SCEV *AddOne = getAddExpr(One, InnerMul);
  1944. const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
  1945. if (Ops.size() == 2) return OuterMul;
  1946. if (AddOp < Idx) {
  1947. Ops.erase(Ops.begin()+AddOp);
  1948. Ops.erase(Ops.begin()+Idx-1);
  1949. } else {
  1950. Ops.erase(Ops.begin()+Idx);
  1951. Ops.erase(Ops.begin()+AddOp-1);
  1952. }
  1953. Ops.push_back(OuterMul);
  1954. return getAddExpr(Ops);
  1955. }
  1956. // Check this multiply against other multiplies being added together.
  1957. for (unsigned OtherMulIdx = Idx+1;
  1958. OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
  1959. ++OtherMulIdx) {
  1960. const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
  1961. // If MulOp occurs in OtherMul, we can fold the two multiplies
  1962. // together.
  1963. for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
  1964. OMulOp != e; ++OMulOp)
  1965. if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
  1966. // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
  1967. const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
  1968. if (Mul->getNumOperands() != 2) {
  1969. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
  1970. Mul->op_begin()+MulOp);
  1971. MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
  1972. InnerMul1 = getMulExpr(MulOps);
  1973. }
  1974. const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
  1975. if (OtherMul->getNumOperands() != 2) {
  1976. SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
  1977. OtherMul->op_begin()+OMulOp);
  1978. MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
  1979. InnerMul2 = getMulExpr(MulOps);
  1980. }
  1981. const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
  1982. const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
  1983. if (Ops.size() == 2) return OuterMul;
  1984. Ops.erase(Ops.begin()+Idx);
  1985. Ops.erase(Ops.begin()+OtherMulIdx-1);
  1986. Ops.push_back(OuterMul);
  1987. return getAddExpr(Ops);
  1988. }
  1989. }
  1990. }
  1991. }
  1992. // If there are any add recurrences in the operands list, see if any other
  1993. // added values are loop invariant. If so, we can fold them into the
  1994. // recurrence.
  1995. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
  1996. ++Idx;
  1997. // Scan over all recurrences, trying to fold loop invariants into them.
  1998. for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
  1999. // Scan all of the other operands to this add and add them to the vector if
  2000. // they are loop invariant w.r.t. the recurrence.
  2001. SmallVector<const SCEV *, 8> LIOps;
  2002. const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
  2003. const Loop *AddRecLoop = AddRec->getLoop();
  2004. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2005. if (isLoopInvariant(Ops[i], AddRecLoop)) {
  2006. LIOps.push_back(Ops[i]);
  2007. Ops.erase(Ops.begin()+i);
  2008. --i; --e;
  2009. }
  2010. // If we found some loop invariants, fold them into the recurrence.
  2011. if (!LIOps.empty()) {
  2012. // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
  2013. LIOps.push_back(AddRec->getStart());
  2014. SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
  2015. AddRec->op_end());
  2016. AddRecOps[0] = getAddExpr(LIOps);
  2017. // Build the new addrec. Propagate the NUW and NSW flags if both the
  2018. // outer add and the inner addrec are guaranteed to have no overflow.
  2019. // Always propagate NW.
  2020. Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
  2021. const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
  2022. // If all of the other operands were loop invariant, we are done.
  2023. if (Ops.size() == 1) return NewRec;
  2024. // Otherwise, add the folded AddRec by the non-invariant parts.
  2025. for (unsigned i = 0;; ++i)
  2026. if (Ops[i] == AddRec) {
  2027. Ops[i] = NewRec;
  2028. break;
  2029. }
  2030. return getAddExpr(Ops);
  2031. }
  2032. // Okay, if there weren't any loop invariants to be folded, check to see if
  2033. // there are multiple AddRec's with the same loop induction variable being
  2034. // added together. If so, we can fold them.
  2035. for (unsigned OtherIdx = Idx+1;
  2036. OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  2037. ++OtherIdx)
  2038. if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
  2039. // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
  2040. SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
  2041. AddRec->op_end());
  2042. for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  2043. ++OtherIdx)
  2044. if (const SCEVAddRecExpr *OtherAddRec =
  2045. dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
  2046. if (OtherAddRec->getLoop() == AddRecLoop) {
  2047. for (unsigned i = 0, e = OtherAddRec->getNumOperands();
  2048. i != e; ++i) {
  2049. if (i >= AddRecOps.size()) {
  2050. AddRecOps.append(OtherAddRec->op_begin()+i,
  2051. OtherAddRec->op_end());
  2052. break;
  2053. }
  2054. AddRecOps[i] = getAddExpr(AddRecOps[i],
  2055. OtherAddRec->getOperand(i));
  2056. }
  2057. Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
  2058. }
  2059. // Step size has changed, so we cannot guarantee no self-wraparound.
  2060. Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
  2061. return getAddExpr(Ops);
  2062. }
  2063. // Otherwise couldn't fold anything into this recurrence. Move onto the
  2064. // next one.
  2065. }
  2066. // Okay, it looks like we really DO need an add expr. Check to see if we
  2067. // already have one, otherwise create a new one.
  2068. FoldingSetNodeID ID;
  2069. ID.AddInteger(scAddExpr);
  2070. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2071. ID.AddPointer(Ops[i]);
  2072. void *IP = nullptr;
  2073. SCEVAddExpr *S =
  2074. static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  2075. if (!S) {
  2076. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2077. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2078. S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
  2079. O, Ops.size());
  2080. UniqueSCEVs.InsertNode(S, IP);
  2081. }
  2082. S->setNoWrapFlags(Flags);
  2083. return S;
  2084. }
  2085. static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
  2086. uint64_t k = i*j;
  2087. if (j > 1 && k / j != i) Overflow = true;
  2088. return k;
  2089. }
  2090. /// Compute the result of "n choose k", the binomial coefficient. If an
  2091. /// intermediate computation overflows, Overflow will be set and the return will
  2092. /// be garbage. Overflow is not cleared on absence of overflow.
  2093. static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
  2094. // We use the multiplicative formula:
  2095. // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
  2096. // At each iteration, we take the n-th term of the numeral and divide by the
  2097. // (k-n)th term of the denominator. This division will always produce an
  2098. // integral result, and helps reduce the chance of overflow in the
  2099. // intermediate computations. However, we can still overflow even when the
  2100. // final result would fit.
  2101. if (n == 0 || n == k) return 1;
  2102. if (k > n) return 0;
  2103. if (k > n/2)
  2104. k = n-k;
  2105. uint64_t r = 1;
  2106. for (uint64_t i = 1; i <= k; ++i) {
  2107. r = umul_ov(r, n-(i-1), Overflow);
  2108. r /= i;
  2109. }
  2110. return r;
  2111. }
  2112. /// Determine if any of the operands in this SCEV are a constant or if
  2113. /// any of the add or multiply expressions in this SCEV contain a constant.
  2114. static bool containsConstantSomewhere(const SCEV *StartExpr) {
  2115. SmallVector<const SCEV *, 4> Ops;
  2116. Ops.push_back(StartExpr);
  2117. while (!Ops.empty()) {
  2118. const SCEV *CurrentExpr = Ops.pop_back_val();
  2119. if (isa<SCEVConstant>(*CurrentExpr))
  2120. return true;
  2121. if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) {
  2122. const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr);
  2123. Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end());
  2124. }
  2125. }
  2126. return false;
  2127. }
  2128. /// getMulExpr - Get a canonical multiply expression, or something simpler if
  2129. /// possible.
  2130. const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
  2131. SCEV::NoWrapFlags Flags) {
  2132. assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
  2133. "only nuw or nsw allowed");
  2134. assert(!Ops.empty() && "Cannot get empty mul!");
  2135. if (Ops.size() == 1) return Ops[0];
  2136. #ifndef NDEBUG
  2137. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  2138. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  2139. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  2140. "SCEVMulExpr operand types don't match!");
  2141. #endif
  2142. Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
  2143. // Sort by complexity, this groups all similar expression types together.
  2144. GroupByComplexity(Ops, LI);
  2145. // If there are any constants, fold them together.
  2146. unsigned Idx = 0;
  2147. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  2148. // C1*(C2+V) -> C1*C2 + C1*V
  2149. if (Ops.size() == 2)
  2150. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
  2151. // If any of Add's ops are Adds or Muls with a constant,
  2152. // apply this transformation as well.
  2153. if (Add->getNumOperands() == 2)
  2154. if (containsConstantSomewhere(Add))
  2155. return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
  2156. getMulExpr(LHSC, Add->getOperand(1)));
  2157. ++Idx;
  2158. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  2159. // We found two constants, fold them together!
  2160. ConstantInt *Fold = ConstantInt::get(getContext(),
  2161. LHSC->getValue()->getValue() *
  2162. RHSC->getValue()->getValue());
  2163. Ops[0] = getConstant(Fold);
  2164. Ops.erase(Ops.begin()+1); // Erase the folded element
  2165. if (Ops.size() == 1) return Ops[0];
  2166. LHSC = cast<SCEVConstant>(Ops[0]);
  2167. }
  2168. // If we are left with a constant one being multiplied, strip it off.
  2169. if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
  2170. Ops.erase(Ops.begin());
  2171. --Idx;
  2172. } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
  2173. // If we have a multiply of zero, it will always be zero.
  2174. return Ops[0];
  2175. } else if (Ops[0]->isAllOnesValue()) {
  2176. // If we have a mul by -1 of an add, try distributing the -1 among the
  2177. // add operands.
  2178. if (Ops.size() == 2) {
  2179. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
  2180. SmallVector<const SCEV *, 4> NewOps;
  2181. bool AnyFolded = false;
  2182. for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
  2183. E = Add->op_end(); I != E; ++I) {
  2184. const SCEV *Mul = getMulExpr(Ops[0], *I);
  2185. if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
  2186. NewOps.push_back(Mul);
  2187. }
  2188. if (AnyFolded)
  2189. return getAddExpr(NewOps);
  2190. }
  2191. else if (const SCEVAddRecExpr *
  2192. AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
  2193. // Negation preserves a recurrence's no self-wrap property.
  2194. SmallVector<const SCEV *, 4> Operands;
  2195. for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
  2196. E = AddRec->op_end(); I != E; ++I) {
  2197. Operands.push_back(getMulExpr(Ops[0], *I));
  2198. }
  2199. return getAddRecExpr(Operands, AddRec->getLoop(),
  2200. AddRec->getNoWrapFlags(SCEV::FlagNW));
  2201. }
  2202. }
  2203. }
  2204. if (Ops.size() == 1)
  2205. return Ops[0];
  2206. }
  2207. // Skip over the add expression until we get to a multiply.
  2208. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
  2209. ++Idx;
  2210. // If there are mul operands inline them all into this expression.
  2211. if (Idx < Ops.size()) {
  2212. bool DeletedMul = false;
  2213. while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
  2214. // If we have an mul, expand the mul operands onto the end of the operands
  2215. // list.
  2216. Ops.erase(Ops.begin()+Idx);
  2217. Ops.append(Mul->op_begin(), Mul->op_end());
  2218. DeletedMul = true;
  2219. }
  2220. // If we deleted at least one mul, we added operands to the end of the list,
  2221. // and they are not necessarily sorted. Recurse to resort and resimplify
  2222. // any operands we just acquired.
  2223. if (DeletedMul)
  2224. return getMulExpr(Ops);
  2225. }
  2226. // If there are any add recurrences in the operands list, see if any other
  2227. // added values are loop invariant. If so, we can fold them into the
  2228. // recurrence.
  2229. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
  2230. ++Idx;
  2231. // Scan over all recurrences, trying to fold loop invariants into them.
  2232. for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
  2233. // Scan all of the other operands to this mul and add them to the vector if
  2234. // they are loop invariant w.r.t. the recurrence.
  2235. SmallVector<const SCEV *, 8> LIOps;
  2236. const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
  2237. const Loop *AddRecLoop = AddRec->getLoop();
  2238. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2239. if (isLoopInvariant(Ops[i], AddRecLoop)) {
  2240. LIOps.push_back(Ops[i]);
  2241. Ops.erase(Ops.begin()+i);
  2242. --i; --e;
  2243. }
  2244. // If we found some loop invariants, fold them into the recurrence.
  2245. if (!LIOps.empty()) {
  2246. // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
  2247. SmallVector<const SCEV *, 4> NewOps;
  2248. NewOps.reserve(AddRec->getNumOperands());
  2249. const SCEV *Scale = getMulExpr(LIOps);
  2250. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
  2251. NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
  2252. // Build the new addrec. Propagate the NUW and NSW flags if both the
  2253. // outer mul and the inner addrec are guaranteed to have no overflow.
  2254. //
  2255. // No self-wrap cannot be guaranteed after changing the step size, but
  2256. // will be inferred if either NUW or NSW is true.
  2257. Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
  2258. const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
  2259. // If all of the other operands were loop invariant, we are done.
  2260. if (Ops.size() == 1) return NewRec;
  2261. // Otherwise, multiply the folded AddRec by the non-invariant parts.
  2262. for (unsigned i = 0;; ++i)
  2263. if (Ops[i] == AddRec) {
  2264. Ops[i] = NewRec;
  2265. break;
  2266. }
  2267. return getMulExpr(Ops);
  2268. }
  2269. // Okay, if there weren't any loop invariants to be folded, check to see if
  2270. // there are multiple AddRec's with the same loop induction variable being
  2271. // multiplied together. If so, we can fold them.
  2272. // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
  2273. // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
  2274. // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
  2275. // ]]],+,...up to x=2n}.
  2276. // Note that the arguments to choose() are always integers with values
  2277. // known at compile time, never SCEV objects.
  2278. //
  2279. // The implementation avoids pointless extra computations when the two
  2280. // addrec's are of different length (mathematically, it's equivalent to
  2281. // an infinite stream of zeros on the right).
  2282. bool OpsModified = false;
  2283. for (unsigned OtherIdx = Idx+1;
  2284. OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  2285. ++OtherIdx) {
  2286. const SCEVAddRecExpr *OtherAddRec =
  2287. dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
  2288. if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
  2289. continue;
  2290. bool Overflow = false;
  2291. Type *Ty = AddRec->getType();
  2292. bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
  2293. SmallVector<const SCEV*, 7> AddRecOps;
  2294. for (int x = 0, xe = AddRec->getNumOperands() +
  2295. OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
  2296. const SCEV *Term = getConstant(Ty, 0);
  2297. for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
  2298. uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
  2299. for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
  2300. ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
  2301. z < ze && !Overflow; ++z) {
  2302. uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
  2303. uint64_t Coeff;
  2304. if (LargerThan64Bits)
  2305. Coeff = umul_ov(Coeff1, Coeff2, Overflow);
  2306. else
  2307. Coeff = Coeff1*Coeff2;
  2308. const SCEV *CoeffTerm = getConstant(Ty, Coeff);
  2309. const SCEV *Term1 = AddRec->getOperand(y-z);
  2310. const SCEV *Term2 = OtherAddRec->getOperand(z);
  2311. Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
  2312. }
  2313. }
  2314. AddRecOps.push_back(Term);
  2315. }
  2316. if (!Overflow) {
  2317. const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
  2318. SCEV::FlagAnyWrap);
  2319. if (Ops.size() == 2) return NewAddRec;
  2320. Ops[Idx] = NewAddRec;
  2321. Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
  2322. OpsModified = true;
  2323. AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
  2324. if (!AddRec)
  2325. break;
  2326. }
  2327. }
  2328. if (OpsModified)
  2329. return getMulExpr(Ops);
  2330. // Otherwise couldn't fold anything into this recurrence. Move onto the
  2331. // next one.
  2332. }
  2333. // Okay, it looks like we really DO need an mul expr. Check to see if we
  2334. // already have one, otherwise create a new one.
  2335. FoldingSetNodeID ID;
  2336. ID.AddInteger(scMulExpr);
  2337. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2338. ID.AddPointer(Ops[i]);
  2339. void *IP = nullptr;
  2340. SCEVMulExpr *S =
  2341. static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  2342. if (!S) {
  2343. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2344. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2345. S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
  2346. O, Ops.size());
  2347. UniqueSCEVs.InsertNode(S, IP);
  2348. }
  2349. S->setNoWrapFlags(Flags);
  2350. return S;
  2351. }
  2352. /// getUDivExpr - Get a canonical unsigned division expression, or something
  2353. /// simpler if possible.
  2354. const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
  2355. const SCEV *RHS) {
  2356. assert(getEffectiveSCEVType(LHS->getType()) ==
  2357. getEffectiveSCEVType(RHS->getType()) &&
  2358. "SCEVUDivExpr operand types don't match!");
  2359. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
  2360. if (RHSC->getValue()->equalsInt(1))
  2361. return LHS; // X udiv 1 --> x
  2362. // If the denominator is zero, the result of the udiv is undefined. Don't
  2363. // try to analyze it, because the resolution chosen here may differ from
  2364. // the resolution chosen in other parts of the compiler.
  2365. if (!RHSC->getValue()->isZero()) {
  2366. // Determine if the division can be folded into the operands of
  2367. // its operands.
  2368. // TODO: Generalize this to non-constants by using known-bits information.
  2369. Type *Ty = LHS->getType();
  2370. unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
  2371. unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
  2372. // For non-power-of-two values, effectively round the value up to the
  2373. // nearest power of two.
  2374. if (!RHSC->getValue()->getValue().isPowerOf2())
  2375. ++MaxShiftAmt;
  2376. IntegerType *ExtTy =
  2377. IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
  2378. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
  2379. if (const SCEVConstant *Step =
  2380. dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
  2381. // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
  2382. const APInt &StepInt = Step->getValue()->getValue();
  2383. const APInt &DivInt = RHSC->getValue()->getValue();
  2384. if (!StepInt.urem(DivInt) &&
  2385. getZeroExtendExpr(AR, ExtTy) ==
  2386. getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
  2387. getZeroExtendExpr(Step, ExtTy),
  2388. AR->getLoop(), SCEV::FlagAnyWrap)) {
  2389. SmallVector<const SCEV *, 4> Operands;
  2390. for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
  2391. Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
  2392. return getAddRecExpr(Operands, AR->getLoop(),
  2393. SCEV::FlagNW);
  2394. }
  2395. /// Get a canonical UDivExpr for a recurrence.
  2396. /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
  2397. // We can currently only fold X%N if X is constant.
  2398. const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
  2399. if (StartC && !DivInt.urem(StepInt) &&
  2400. getZeroExtendExpr(AR, ExtTy) ==
  2401. getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
  2402. getZeroExtendExpr(Step, ExtTy),
  2403. AR->getLoop(), SCEV::FlagAnyWrap)) {
  2404. const APInt &StartInt = StartC->getValue()->getValue();
  2405. const APInt &StartRem = StartInt.urem(StepInt);
  2406. if (StartRem != 0)
  2407. LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
  2408. AR->getLoop(), SCEV::FlagNW);
  2409. }
  2410. }
  2411. // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
  2412. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
  2413. SmallVector<const SCEV *, 4> Operands;
  2414. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
  2415. Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
  2416. if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
  2417. // Find an operand that's safely divisible.
  2418. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
  2419. const SCEV *Op = M->getOperand(i);
  2420. const SCEV *Div = getUDivExpr(Op, RHSC);
  2421. if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
  2422. Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
  2423. M->op_end());
  2424. Operands[i] = Div;
  2425. return getMulExpr(Operands);
  2426. }
  2427. }
  2428. }
  2429. // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
  2430. if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
  2431. SmallVector<const SCEV *, 4> Operands;
  2432. for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
  2433. Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
  2434. if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
  2435. Operands.clear();
  2436. for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
  2437. const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
  2438. if (isa<SCEVUDivExpr>(Op) ||
  2439. getMulExpr(Op, RHS) != A->getOperand(i))
  2440. break;
  2441. Operands.push_back(Op);
  2442. }
  2443. if (Operands.size() == A->getNumOperands())
  2444. return getAddExpr(Operands);
  2445. }
  2446. }
  2447. // Fold if both operands are constant.
  2448. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
  2449. Constant *LHSCV = LHSC->getValue();
  2450. Constant *RHSCV = RHSC->getValue();
  2451. return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
  2452. RHSCV)));
  2453. }
  2454. }
  2455. }
  2456. FoldingSetNodeID ID;
  2457. ID.AddInteger(scUDivExpr);
  2458. ID.AddPointer(LHS);
  2459. ID.AddPointer(RHS);
  2460. void *IP = nullptr;
  2461. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  2462. SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
  2463. LHS, RHS);
  2464. UniqueSCEVs.InsertNode(S, IP);
  2465. return S;
  2466. }
  2467. static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
  2468. APInt A = C1->getValue()->getValue().abs();
  2469. APInt B = C2->getValue()->getValue().abs();
  2470. uint32_t ABW = A.getBitWidth();
  2471. uint32_t BBW = B.getBitWidth();
  2472. if (ABW > BBW)
  2473. B = B.zext(ABW);
  2474. else if (ABW < BBW)
  2475. A = A.zext(BBW);
  2476. return APIntOps::GreatestCommonDivisor(A, B);
  2477. }
  2478. /// getUDivExactExpr - Get a canonical unsigned division expression, or
  2479. /// something simpler if possible. There is no representation for an exact udiv
  2480. /// in SCEV IR, but we can attempt to remove factors from the LHS and RHS.
  2481. /// We can't do this when it's not exact because the udiv may be clearing bits.
  2482. const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
  2483. const SCEV *RHS) {
  2484. // TODO: we could try to find factors in all sorts of things, but for now we
  2485. // just deal with u/exact (multiply, constant). See SCEVDivision towards the
  2486. // end of this file for inspiration.
  2487. const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
  2488. if (!Mul)
  2489. return getUDivExpr(LHS, RHS);
  2490. if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
  2491. // If the mulexpr multiplies by a constant, then that constant must be the
  2492. // first element of the mulexpr.
  2493. if (const SCEVConstant *LHSCst =
  2494. dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
  2495. if (LHSCst == RHSCst) {
  2496. SmallVector<const SCEV *, 2> Operands;
  2497. Operands.append(Mul->op_begin() + 1, Mul->op_end());
  2498. return getMulExpr(Operands);
  2499. }
  2500. // We can't just assume that LHSCst divides RHSCst cleanly, it could be
  2501. // that there's a factor provided by one of the other terms. We need to
  2502. // check.
  2503. APInt Factor = gcd(LHSCst, RHSCst);
  2504. if (!Factor.isIntN(1)) {
  2505. LHSCst = cast<SCEVConstant>(
  2506. getConstant(LHSCst->getValue()->getValue().udiv(Factor)));
  2507. RHSCst = cast<SCEVConstant>(
  2508. getConstant(RHSCst->getValue()->getValue().udiv(Factor)));
  2509. SmallVector<const SCEV *, 2> Operands;
  2510. Operands.push_back(LHSCst);
  2511. Operands.append(Mul->op_begin() + 1, Mul->op_end());
  2512. LHS = getMulExpr(Operands);
  2513. RHS = RHSCst;
  2514. Mul = dyn_cast<SCEVMulExpr>(LHS);
  2515. if (!Mul)
  2516. return getUDivExactExpr(LHS, RHS);
  2517. }
  2518. }
  2519. }
  2520. for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
  2521. if (Mul->getOperand(i) == RHS) {
  2522. SmallVector<const SCEV *, 2> Operands;
  2523. Operands.append(Mul->op_begin(), Mul->op_begin() + i);
  2524. Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
  2525. return getMulExpr(Operands);
  2526. }
  2527. }
  2528. return getUDivExpr(LHS, RHS);
  2529. }
  2530. /// getAddRecExpr - Get an add recurrence expression for the specified loop.
  2531. /// Simplify the expression as much as possible.
  2532. const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
  2533. const Loop *L,
  2534. SCEV::NoWrapFlags Flags) {
  2535. SmallVector<const SCEV *, 4> Operands;
  2536. Operands.push_back(Start);
  2537. if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
  2538. if (StepChrec->getLoop() == L) {
  2539. Operands.append(StepChrec->op_begin(), StepChrec->op_end());
  2540. return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
  2541. }
  2542. Operands.push_back(Step);
  2543. return getAddRecExpr(Operands, L, Flags);
  2544. }
  2545. /// getAddRecExpr - Get an add recurrence expression for the specified loop.
  2546. /// Simplify the expression as much as possible.
  2547. const SCEV *
  2548. ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
  2549. const Loop *L, SCEV::NoWrapFlags Flags) {
  2550. if (Operands.size() == 1) return Operands[0];
  2551. #ifndef NDEBUG
  2552. Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
  2553. for (unsigned i = 1, e = Operands.size(); i != e; ++i)
  2554. assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
  2555. "SCEVAddRecExpr operand types don't match!");
  2556. for (unsigned i = 0, e = Operands.size(); i != e; ++i)
  2557. assert(isLoopInvariant(Operands[i], L) &&
  2558. "SCEVAddRecExpr operand is not loop-invariant!");
  2559. #endif
  2560. if (Operands.back()->isZero()) {
  2561. Operands.pop_back();
  2562. return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
  2563. }
  2564. // It's tempting to want to call getMaxBackedgeTakenCount count here and
  2565. // use that information to infer NUW and NSW flags. However, computing a
  2566. // BE count requires calling getAddRecExpr, so we may not yet have a
  2567. // meaningful BE count at this point (and if we don't, we'd be stuck
  2568. // with a SCEVCouldNotCompute as the cached BE count).
  2569. Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
  2570. // Canonicalize nested AddRecs in by nesting them in order of loop depth.
  2571. if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
  2572. const Loop *NestedLoop = NestedAR->getLoop();
  2573. if (L->contains(NestedLoop) ?
  2574. (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
  2575. (!NestedLoop->contains(L) &&
  2576. DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
  2577. SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
  2578. NestedAR->op_end());
  2579. Operands[0] = NestedAR->getStart();
  2580. // AddRecs require their operands be loop-invariant with respect to their
  2581. // loops. Don't perform this transformation if it would break this
  2582. // requirement.
  2583. bool AllInvariant = true;
  2584. for (unsigned i = 0, e = Operands.size(); i != e; ++i)
  2585. if (!isLoopInvariant(Operands[i], L)) {
  2586. AllInvariant = false;
  2587. break;
  2588. }
  2589. if (AllInvariant) {
  2590. // Create a recurrence for the outer loop with the same step size.
  2591. //
  2592. // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
  2593. // inner recurrence has the same property.
  2594. SCEV::NoWrapFlags OuterFlags =
  2595. maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
  2596. NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
  2597. AllInvariant = true;
  2598. for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
  2599. if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
  2600. AllInvariant = false;
  2601. break;
  2602. }
  2603. if (AllInvariant) {
  2604. // Ok, both add recurrences are valid after the transformation.
  2605. //
  2606. // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
  2607. // the outer recurrence has the same property.
  2608. SCEV::NoWrapFlags InnerFlags =
  2609. maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
  2610. return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
  2611. }
  2612. }
  2613. // Reset Operands to its original state.
  2614. Operands[0] = NestedAR;
  2615. }
  2616. }
  2617. // Okay, it looks like we really DO need an addrec expr. Check to see if we
  2618. // already have one, otherwise create a new one.
  2619. FoldingSetNodeID ID;
  2620. ID.AddInteger(scAddRecExpr);
  2621. for (unsigned i = 0, e = Operands.size(); i != e; ++i)
  2622. ID.AddPointer(Operands[i]);
  2623. ID.AddPointer(L);
  2624. void *IP = nullptr;
  2625. SCEVAddRecExpr *S =
  2626. static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  2627. if (!S) {
  2628. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
  2629. std::uninitialized_copy(Operands.begin(), Operands.end(), O);
  2630. S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
  2631. O, Operands.size(), L);
  2632. UniqueSCEVs.InsertNode(S, IP);
  2633. }
  2634. S->setNoWrapFlags(Flags);
  2635. return S;
  2636. }
  2637. const SCEV *
  2638. ScalarEvolution::getGEPExpr(Type *PointeeType, const SCEV *BaseExpr,
  2639. const SmallVectorImpl<const SCEV *> &IndexExprs,
  2640. bool InBounds) {
  2641. // getSCEV(Base)->getType() has the same address space as Base->getType()
  2642. // because SCEV::getType() preserves the address space.
  2643. Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType());
  2644. // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
  2645. // instruction to its SCEV, because the Instruction may be guarded by control
  2646. // flow and the no-overflow bits may not be valid for the expression in any
  2647. // context.
  2648. SCEV::NoWrapFlags Wrap = InBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
  2649. const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
  2650. // The address space is unimportant. The first thing we do on CurTy is getting
  2651. // its element type.
  2652. Type *CurTy = PointerType::getUnqual(PointeeType);
  2653. for (const SCEV *IndexExpr : IndexExprs) {
  2654. // Compute the (potentially symbolic) offset in bytes for this index.
  2655. if (StructType *STy = dyn_cast<StructType>(CurTy)) {
  2656. // For a struct, add the member offset.
  2657. ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
  2658. unsigned FieldNo = Index->getZExtValue();
  2659. const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
  2660. // Add the field offset to the running total offset.
  2661. TotalOffset = getAddExpr(TotalOffset, FieldOffset);
  2662. // Update CurTy to the type of the field at Index.
  2663. CurTy = STy->getTypeAtIndex(Index);
  2664. } else {
  2665. // Update CurTy to its element type.
  2666. CurTy = cast<SequentialType>(CurTy)->getElementType();
  2667. // For an array, add the element offset, explicitly scaled.
  2668. const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy);
  2669. // Getelementptr indices are signed.
  2670. IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy);
  2671. // Multiply the index by the element size to compute the element offset.
  2672. const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);
  2673. // Add the element offset to the running total offset.
  2674. TotalOffset = getAddExpr(TotalOffset, LocalOffset);
  2675. }
  2676. }
  2677. // Add the total offset from all the GEP indices to the base.
  2678. return getAddExpr(BaseExpr, TotalOffset, Wrap);
  2679. }
  2680. const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
  2681. const SCEV *RHS) {
  2682. SmallVector<const SCEV *, 2> Ops;
  2683. Ops.push_back(LHS);
  2684. Ops.push_back(RHS);
  2685. return getSMaxExpr(Ops);
  2686. }
  2687. const SCEV *
  2688. ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
  2689. assert(!Ops.empty() && "Cannot get empty smax!");
  2690. if (Ops.size() == 1) return Ops[0];
  2691. #ifndef NDEBUG
  2692. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  2693. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  2694. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  2695. "SCEVSMaxExpr operand types don't match!");
  2696. #endif
  2697. // Sort by complexity, this groups all similar expression types together.
  2698. GroupByComplexity(Ops, LI);
  2699. // If there are any constants, fold them together.
  2700. unsigned Idx = 0;
  2701. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  2702. ++Idx;
  2703. assert(Idx < Ops.size());
  2704. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  2705. // We found two constants, fold them together!
  2706. ConstantInt *Fold = ConstantInt::get(getContext(),
  2707. APIntOps::smax(LHSC->getValue()->getValue(),
  2708. RHSC->getValue()->getValue()));
  2709. Ops[0] = getConstant(Fold);
  2710. Ops.erase(Ops.begin()+1); // Erase the folded element
  2711. if (Ops.size() == 1) return Ops[0];
  2712. LHSC = cast<SCEVConstant>(Ops[0]);
  2713. }
  2714. // If we are left with a constant minimum-int, strip it off.
  2715. if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
  2716. Ops.erase(Ops.begin());
  2717. --Idx;
  2718. } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
  2719. // If we have an smax with a constant maximum-int, it will always be
  2720. // maximum-int.
  2721. return Ops[0];
  2722. }
  2723. if (Ops.size() == 1) return Ops[0];
  2724. }
  2725. // Find the first SMax
  2726. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
  2727. ++Idx;
  2728. // Check to see if one of the operands is an SMax. If so, expand its operands
  2729. // onto our operand list, and recurse to simplify.
  2730. if (Idx < Ops.size()) {
  2731. bool DeletedSMax = false;
  2732. while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
  2733. Ops.erase(Ops.begin()+Idx);
  2734. Ops.append(SMax->op_begin(), SMax->op_end());
  2735. DeletedSMax = true;
  2736. }
  2737. if (DeletedSMax)
  2738. return getSMaxExpr(Ops);
  2739. }
  2740. // Okay, check to see if the same value occurs in the operand list twice. If
  2741. // so, delete one. Since we sorted the list, these values are required to
  2742. // be adjacent.
  2743. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
  2744. // X smax Y smax Y --> X smax Y
  2745. // X smax Y --> X, if X is always greater than Y
  2746. if (Ops[i] == Ops[i+1] ||
  2747. isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
  2748. Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
  2749. --i; --e;
  2750. } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
  2751. Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
  2752. --i; --e;
  2753. }
  2754. if (Ops.size() == 1) return Ops[0];
  2755. assert(!Ops.empty() && "Reduced smax down to nothing!");
  2756. // Okay, it looks like we really DO need an smax expr. Check to see if we
  2757. // already have one, otherwise create a new one.
  2758. FoldingSetNodeID ID;
  2759. ID.AddInteger(scSMaxExpr);
  2760. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2761. ID.AddPointer(Ops[i]);
  2762. void *IP = nullptr;
  2763. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  2764. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2765. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2766. SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
  2767. O, Ops.size());
  2768. UniqueSCEVs.InsertNode(S, IP);
  2769. return S;
  2770. }
  2771. const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
  2772. const SCEV *RHS) {
  2773. SmallVector<const SCEV *, 2> Ops;
  2774. Ops.push_back(LHS);
  2775. Ops.push_back(RHS);
  2776. return getUMaxExpr(Ops);
  2777. }
  2778. const SCEV *
  2779. ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
  2780. assert(!Ops.empty() && "Cannot get empty umax!");
  2781. if (Ops.size() == 1) return Ops[0];
  2782. #ifndef NDEBUG
  2783. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  2784. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  2785. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  2786. "SCEVUMaxExpr operand types don't match!");
  2787. #endif
  2788. // Sort by complexity, this groups all similar expression types together.
  2789. GroupByComplexity(Ops, LI);
  2790. // If there are any constants, fold them together.
  2791. unsigned Idx = 0;
  2792. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  2793. ++Idx;
  2794. assert(Idx < Ops.size());
  2795. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  2796. // We found two constants, fold them together!
  2797. ConstantInt *Fold = ConstantInt::get(getContext(),
  2798. APIntOps::umax(LHSC->getValue()->getValue(),
  2799. RHSC->getValue()->getValue()));
  2800. Ops[0] = getConstant(Fold);
  2801. Ops.erase(Ops.begin()+1); // Erase the folded element
  2802. if (Ops.size() == 1) return Ops[0];
  2803. LHSC = cast<SCEVConstant>(Ops[0]);
  2804. }
  2805. // If we are left with a constant minimum-int, strip it off.
  2806. if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
  2807. Ops.erase(Ops.begin());
  2808. --Idx;
  2809. } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
  2810. // If we have an umax with a constant maximum-int, it will always be
  2811. // maximum-int.
  2812. return Ops[0];
  2813. }
  2814. if (Ops.size() == 1) return Ops[0];
  2815. }
  2816. // Find the first UMax
  2817. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
  2818. ++Idx;
  2819. // Check to see if one of the operands is a UMax. If so, expand its operands
  2820. // onto our operand list, and recurse to simplify.
  2821. if (Idx < Ops.size()) {
  2822. bool DeletedUMax = false;
  2823. while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
  2824. Ops.erase(Ops.begin()+Idx);
  2825. Ops.append(UMax->op_begin(), UMax->op_end());
  2826. DeletedUMax = true;
  2827. }
  2828. if (DeletedUMax)
  2829. return getUMaxExpr(Ops);
  2830. }
  2831. // Okay, check to see if the same value occurs in the operand list twice. If
  2832. // so, delete one. Since we sorted the list, these values are required to
  2833. // be adjacent.
  2834. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
  2835. // X umax Y umax Y --> X umax Y
  2836. // X umax Y --> X, if X is always greater than Y
  2837. if (Ops[i] == Ops[i+1] ||
  2838. isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
  2839. Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
  2840. --i; --e;
  2841. } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
  2842. Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
  2843. --i; --e;
  2844. }
  2845. if (Ops.size() == 1) return Ops[0];
  2846. assert(!Ops.empty() && "Reduced umax down to nothing!");
  2847. // Okay, it looks like we really DO need a umax expr. Check to see if we
  2848. // already have one, otherwise create a new one.
  2849. FoldingSetNodeID ID;
  2850. ID.AddInteger(scUMaxExpr);
  2851. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2852. ID.AddPointer(Ops[i]);
  2853. void *IP = nullptr;
  2854. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  2855. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2856. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2857. SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
  2858. O, Ops.size());
  2859. UniqueSCEVs.InsertNode(S, IP);
  2860. return S;
  2861. }
  2862. const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
  2863. const SCEV *RHS) {
  2864. // ~smax(~x, ~y) == smin(x, y).
  2865. return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
  2866. }
  2867. const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
  2868. const SCEV *RHS) {
  2869. // ~umax(~x, ~y) == umin(x, y)
  2870. return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
  2871. }
  2872. const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
  2873. // We can bypass creating a target-independent
  2874. // constant expression and then folding it back into a ConstantInt.
  2875. // This is just a compile-time optimization.
  2876. return getConstant(IntTy,
  2877. F->getParent()->getDataLayout().getTypeAllocSize(AllocTy));
  2878. }
  2879. const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
  2880. StructType *STy,
  2881. unsigned FieldNo) {
  2882. // We can bypass creating a target-independent
  2883. // constant expression and then folding it back into a ConstantInt.
  2884. // This is just a compile-time optimization.
  2885. return getConstant(
  2886. IntTy,
  2887. F->getParent()->getDataLayout().getStructLayout(STy)->getElementOffset(
  2888. FieldNo));
  2889. }
  2890. const SCEV *ScalarEvolution::getUnknown(Value *V) {
  2891. // Don't attempt to do anything other than create a SCEVUnknown object
  2892. // here. createSCEV only calls getUnknown after checking for all other
  2893. // interesting possibilities, and any other code that calls getUnknown
  2894. // is doing so in order to hide a value from SCEV canonicalization.
  2895. FoldingSetNodeID ID;
  2896. ID.AddInteger(scUnknown);
  2897. ID.AddPointer(V);
  2898. void *IP = nullptr;
  2899. if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
  2900. assert(cast<SCEVUnknown>(S)->getValue() == V &&
  2901. "Stale SCEVUnknown in uniquing map!");
  2902. return S;
  2903. }
  2904. SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
  2905. FirstUnknown);
  2906. FirstUnknown = cast<SCEVUnknown>(S);
  2907. UniqueSCEVs.InsertNode(S, IP);
  2908. return S;
  2909. }
  2910. //===----------------------------------------------------------------------===//
  2911. // Basic SCEV Analysis and PHI Idiom Recognition Code
  2912. //
  2913. /// isSCEVable - Test if values of the given type are analyzable within
  2914. /// the SCEV framework. This primarily includes integer types, and it
  2915. /// can optionally include pointer types if the ScalarEvolution class
  2916. /// has access to target-specific information.
  2917. bool ScalarEvolution::isSCEVable(Type *Ty) const {
  2918. // Integers and pointers are always SCEVable.
  2919. return Ty->isIntegerTy() || Ty->isPointerTy();
  2920. }
  2921. /// getTypeSizeInBits - Return the size in bits of the specified type,
  2922. /// for which isSCEVable must return true.
  2923. uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
  2924. assert(isSCEVable(Ty) && "Type is not SCEVable!");
  2925. return F->getParent()->getDataLayout().getTypeSizeInBits(Ty);
  2926. }
  2927. /// getEffectiveSCEVType - Return a type with the same bitwidth as
  2928. /// the given type and which represents how SCEV will treat the given
  2929. /// type, for which isSCEVable must return true. For pointer types,
  2930. /// this is the pointer-sized integer type.
  2931. Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
  2932. assert(isSCEVable(Ty) && "Type is not SCEVable!");
  2933. if (Ty->isIntegerTy()) {
  2934. return Ty;
  2935. }
  2936. // The only other support type is pointer.
  2937. assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
  2938. return F->getParent()->getDataLayout().getIntPtrType(Ty);
  2939. }
  2940. const SCEV *ScalarEvolution::getCouldNotCompute() {
  2941. return &CouldNotCompute;
  2942. }
  2943. namespace {
  2944. // Helper class working with SCEVTraversal to figure out if a SCEV contains
  2945. // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne
  2946. // is set iff if find such SCEVUnknown.
  2947. //
  2948. struct FindInvalidSCEVUnknown {
  2949. bool FindOne;
  2950. FindInvalidSCEVUnknown() { FindOne = false; }
  2951. bool follow(const SCEV *S) {
  2952. switch (static_cast<SCEVTypes>(S->getSCEVType())) {
  2953. case scConstant:
  2954. return false;
  2955. case scUnknown:
  2956. if (!cast<SCEVUnknown>(S)->getValue())
  2957. FindOne = true;
  2958. return false;
  2959. default:
  2960. return true;
  2961. }
  2962. }
  2963. bool isDone() const { return FindOne; }
  2964. };
  2965. }
  2966. bool ScalarEvolution::checkValidity(const SCEV *S) const {
  2967. FindInvalidSCEVUnknown F;
  2968. SCEVTraversal<FindInvalidSCEVUnknown> ST(F);
  2969. ST.visitAll(S);
  2970. return !F.FindOne;
  2971. }
  2972. /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
  2973. /// expression and create a new one.
  2974. const SCEV *ScalarEvolution::getSCEV(Value *V) {
  2975. assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
  2976. ValueExprMapType::iterator I = ValueExprMap.find_as(V);
  2977. if (I != ValueExprMap.end()) {
  2978. const SCEV *S = I->second;
  2979. if (checkValidity(S))
  2980. return S;
  2981. else
  2982. ValueExprMap.erase(I);
  2983. }
  2984. const SCEV *S = createSCEV(V);
  2985. // The process of creating a SCEV for V may have caused other SCEVs
  2986. // to have been created, so it's necessary to insert the new entry
  2987. // from scratch, rather than trying to remember the insert position
  2988. // above.
  2989. ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
  2990. return S;
  2991. }
  2992. /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
  2993. ///
  2994. const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
  2995. if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
  2996. return getConstant(
  2997. cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
  2998. Type *Ty = V->getType();
  2999. Ty = getEffectiveSCEVType(Ty);
  3000. return getMulExpr(V,
  3001. getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
  3002. }
  3003. /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
  3004. const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
  3005. if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
  3006. return getConstant(
  3007. cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
  3008. Type *Ty = V->getType();
  3009. Ty = getEffectiveSCEVType(Ty);
  3010. const SCEV *AllOnes =
  3011. getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
  3012. return getMinusSCEV(AllOnes, V);
  3013. }
  3014. /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
  3015. const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
  3016. SCEV::NoWrapFlags Flags) {
  3017. assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
  3018. // Fast path: X - X --> 0.
  3019. if (LHS == RHS)
  3020. return getConstant(LHS->getType(), 0);
  3021. // X - Y --> X + -Y.
  3022. // X -(nsw || nuw) Y --> X + -Y.
  3023. return getAddExpr(LHS, getNegativeSCEV(RHS));
  3024. }
  3025. /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
  3026. /// input value to the specified type. If the type must be extended, it is zero
  3027. /// extended.
  3028. const SCEV *
  3029. ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
  3030. Type *SrcTy = V->getType();
  3031. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3032. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3033. "Cannot truncate or zero extend with non-integer arguments!");
  3034. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3035. return V; // No conversion
  3036. if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
  3037. return getTruncateExpr(V, Ty);
  3038. return getZeroExtendExpr(V, Ty);
  3039. }
  3040. /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
  3041. /// input value to the specified type. If the type must be extended, it is sign
  3042. /// extended.
  3043. const SCEV *
  3044. ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
  3045. Type *Ty) {
  3046. Type *SrcTy = V->getType();
  3047. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3048. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3049. "Cannot truncate or zero extend with non-integer arguments!");
  3050. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3051. return V; // No conversion
  3052. if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
  3053. return getTruncateExpr(V, Ty);
  3054. return getSignExtendExpr(V, Ty);
  3055. }
  3056. /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
  3057. /// input value to the specified type. If the type must be extended, it is zero
  3058. /// extended. The conversion must not be narrowing.
  3059. const SCEV *
  3060. ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
  3061. Type *SrcTy = V->getType();
  3062. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3063. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3064. "Cannot noop or zero extend with non-integer arguments!");
  3065. assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
  3066. "getNoopOrZeroExtend cannot truncate!");
  3067. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3068. return V; // No conversion
  3069. return getZeroExtendExpr(V, Ty);
  3070. }
  3071. /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
  3072. /// input value to the specified type. If the type must be extended, it is sign
  3073. /// extended. The conversion must not be narrowing.
  3074. const SCEV *
  3075. ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
  3076. Type *SrcTy = V->getType();
  3077. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3078. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3079. "Cannot noop or sign extend with non-integer arguments!");
  3080. assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
  3081. "getNoopOrSignExtend cannot truncate!");
  3082. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3083. return V; // No conversion
  3084. return getSignExtendExpr(V, Ty);
  3085. }
  3086. /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
  3087. /// the input value to the specified type. If the type must be extended,
  3088. /// it is extended with unspecified bits. The conversion must not be
  3089. /// narrowing.
  3090. const SCEV *
  3091. ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
  3092. Type *SrcTy = V->getType();
  3093. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3094. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3095. "Cannot noop or any extend with non-integer arguments!");
  3096. assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
  3097. "getNoopOrAnyExtend cannot truncate!");
  3098. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3099. return V; // No conversion
  3100. return getAnyExtendExpr(V, Ty);
  3101. }
  3102. /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
  3103. /// input value to the specified type. The conversion must not be widening.
  3104. const SCEV *
  3105. ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
  3106. Type *SrcTy = V->getType();
  3107. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  3108. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  3109. "Cannot truncate or noop with non-integer arguments!");
  3110. assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
  3111. "getTruncateOrNoop cannot extend!");
  3112. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  3113. return V; // No conversion
  3114. return getTruncateExpr(V, Ty);
  3115. }
  3116. /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
  3117. /// the types using zero-extension, and then perform a umax operation
  3118. /// with them.
  3119. const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
  3120. const SCEV *RHS) {
  3121. const SCEV *PromotedLHS = LHS;
  3122. const SCEV *PromotedRHS = RHS;
  3123. if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
  3124. PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
  3125. else
  3126. PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
  3127. return getUMaxExpr(PromotedLHS, PromotedRHS);
  3128. }
  3129. /// getUMinFromMismatchedTypes - Promote the operands to the wider of
  3130. /// the types using zero-extension, and then perform a umin operation
  3131. /// with them.
  3132. const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
  3133. const SCEV *RHS) {
  3134. const SCEV *PromotedLHS = LHS;
  3135. const SCEV *PromotedRHS = RHS;
  3136. if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
  3137. PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
  3138. else
  3139. PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
  3140. return getUMinExpr(PromotedLHS, PromotedRHS);
  3141. }
  3142. /// getPointerBase - Transitively follow the chain of pointer-type operands
  3143. /// until reaching a SCEV that does not have a single pointer operand. This
  3144. /// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
  3145. /// but corner cases do exist.
  3146. const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
  3147. // A pointer operand may evaluate to a nonpointer expression, such as null.
  3148. if (!V->getType()->isPointerTy())
  3149. return V;
  3150. if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
  3151. return getPointerBase(Cast->getOperand());
  3152. }
  3153. else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
  3154. const SCEV *PtrOp = nullptr;
  3155. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  3156. I != E; ++I) {
  3157. if ((*I)->getType()->isPointerTy()) {
  3158. // Cannot find the base of an expression with multiple pointer operands.
  3159. if (PtrOp)
  3160. return V;
  3161. PtrOp = *I;
  3162. }
  3163. }
  3164. if (!PtrOp)
  3165. return V;
  3166. return getPointerBase(PtrOp);
  3167. }
  3168. return V;
  3169. }
  3170. /// PushDefUseChildren - Push users of the given Instruction
  3171. /// onto the given Worklist.
  3172. static void
  3173. PushDefUseChildren(Instruction *I,
  3174. SmallVectorImpl<Instruction *> &Worklist) {
  3175. // Push the def-use children onto the Worklist stack.
  3176. for (User *U : I->users())
  3177. Worklist.push_back(cast<Instruction>(U));
  3178. }
  3179. /// ForgetSymbolicValue - This looks up computed SCEV values for all
  3180. /// instructions that depend on the given instruction and removes them from
  3181. /// the ValueExprMapType map if they reference SymName. This is used during PHI
  3182. /// resolution.
  3183. void
  3184. ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
  3185. SmallVector<Instruction *, 16> Worklist;
  3186. PushDefUseChildren(PN, Worklist);
  3187. SmallPtrSet<Instruction *, 8> Visited;
  3188. Visited.insert(PN);
  3189. while (!Worklist.empty()) {
  3190. Instruction *I = Worklist.pop_back_val();
  3191. if (!Visited.insert(I).second)
  3192. continue;
  3193. ValueExprMapType::iterator It =
  3194. ValueExprMap.find_as(static_cast<Value *>(I));
  3195. if (It != ValueExprMap.end()) {
  3196. const SCEV *Old = It->second;
  3197. // Short-circuit the def-use traversal if the symbolic name
  3198. // ceases to appear in expressions.
  3199. if (Old != SymName && !hasOperand(Old, SymName))
  3200. continue;
  3201. // SCEVUnknown for a PHI either means that it has an unrecognized
  3202. // structure, it's a PHI that's in the progress of being computed
  3203. // by createNodeForPHI, or it's a single-value PHI. In the first case,
  3204. // additional loop trip count information isn't going to change anything.
  3205. // In the second case, createNodeForPHI will perform the necessary
  3206. // updates on its own when it gets to that point. In the third, we do
  3207. // want to forget the SCEVUnknown.
  3208. if (!isa<PHINode>(I) ||
  3209. !isa<SCEVUnknown>(Old) ||
  3210. (I != PN && Old == SymName)) {
  3211. forgetMemoizedResults(Old);
  3212. ValueExprMap.erase(It);
  3213. }
  3214. }
  3215. PushDefUseChildren(I, Worklist);
  3216. }
  3217. }
  3218. /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
  3219. /// a loop header, making it a potential recurrence, or it doesn't.
  3220. ///
  3221. const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
  3222. if (const Loop *L = LI->getLoopFor(PN->getParent()))
  3223. if (L->getHeader() == PN->getParent()) {
  3224. // The loop may have multiple entrances or multiple exits; we can analyze
  3225. // this phi as an addrec if it has a unique entry value and a unique
  3226. // backedge value.
  3227. Value *BEValueV = nullptr, *StartValueV = nullptr;
  3228. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
  3229. Value *V = PN->getIncomingValue(i);
  3230. if (L->contains(PN->getIncomingBlock(i))) {
  3231. if (!BEValueV) {
  3232. BEValueV = V;
  3233. } else if (BEValueV != V) {
  3234. BEValueV = nullptr;
  3235. break;
  3236. }
  3237. } else if (!StartValueV) {
  3238. StartValueV = V;
  3239. } else if (StartValueV != V) {
  3240. StartValueV = nullptr;
  3241. break;
  3242. }
  3243. }
  3244. if (BEValueV && StartValueV) {
  3245. // While we are analyzing this PHI node, handle its value symbolically.
  3246. const SCEV *SymbolicName = getUnknown(PN);
  3247. assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
  3248. "PHI node already processed?");
  3249. ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
  3250. // Using this symbolic name for the PHI, analyze the value coming around
  3251. // the back-edge.
  3252. const SCEV *BEValue = getSCEV(BEValueV);
  3253. // NOTE: If BEValue is loop invariant, we know that the PHI node just
  3254. // has a special value for the first iteration of the loop.
  3255. // If the value coming around the backedge is an add with the symbolic
  3256. // value we just inserted, then we found a simple induction variable!
  3257. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
  3258. // If there is a single occurrence of the symbolic value, replace it
  3259. // with a recurrence.
  3260. unsigned FoundIndex = Add->getNumOperands();
  3261. for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
  3262. if (Add->getOperand(i) == SymbolicName)
  3263. if (FoundIndex == e) {
  3264. FoundIndex = i;
  3265. break;
  3266. }
  3267. if (FoundIndex != Add->getNumOperands()) {
  3268. // Create an add with everything but the specified operand.
  3269. SmallVector<const SCEV *, 8> Ops;
  3270. for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
  3271. if (i != FoundIndex)
  3272. Ops.push_back(Add->getOperand(i));
  3273. const SCEV *Accum = getAddExpr(Ops);
  3274. // This is not a valid addrec if the step amount is varying each
  3275. // loop iteration, but is not itself an addrec in this loop.
  3276. if (isLoopInvariant(Accum, L) ||
  3277. (isa<SCEVAddRecExpr>(Accum) &&
  3278. cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
  3279. SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
  3280. // If the increment doesn't overflow, then neither the addrec nor
  3281. // the post-increment will overflow.
  3282. if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
  3283. if (OBO->getOperand(0) == PN) {
  3284. if (OBO->hasNoUnsignedWrap())
  3285. Flags = setFlags(Flags, SCEV::FlagNUW);
  3286. if (OBO->hasNoSignedWrap())
  3287. Flags = setFlags(Flags, SCEV::FlagNSW);
  3288. }
  3289. } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
  3290. // If the increment is an inbounds GEP, then we know the address
  3291. // space cannot be wrapped around. We cannot make any guarantee
  3292. // about signed or unsigned overflow because pointers are
  3293. // unsigned but we may have a negative index from the base
  3294. // pointer. We can guarantee that no unsigned wrap occurs if the
  3295. // indices form a positive value.
  3296. if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
  3297. Flags = setFlags(Flags, SCEV::FlagNW);
  3298. const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
  3299. if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
  3300. Flags = setFlags(Flags, SCEV::FlagNUW);
  3301. }
  3302. // We cannot transfer nuw and nsw flags from subtraction
  3303. // operations -- sub nuw X, Y is not the same as add nuw X, -Y
  3304. // for instance.
  3305. }
  3306. const SCEV *StartVal = getSCEV(StartValueV);
  3307. const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
  3308. // Since the no-wrap flags are on the increment, they apply to the
  3309. // post-incremented value as well.
  3310. if (isLoopInvariant(Accum, L))
  3311. (void)getAddRecExpr(getAddExpr(StartVal, Accum),
  3312. Accum, L, Flags);
  3313. // Okay, for the entire analysis of this edge we assumed the PHI
  3314. // to be symbolic. We now need to go back and purge all of the
  3315. // entries for the scalars that use the symbolic expression.
  3316. ForgetSymbolicName(PN, SymbolicName);
  3317. ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
  3318. return PHISCEV;
  3319. }
  3320. }
  3321. } else if (const SCEVAddRecExpr *AddRec =
  3322. dyn_cast<SCEVAddRecExpr>(BEValue)) {
  3323. // Otherwise, this could be a loop like this:
  3324. // i = 0; for (j = 1; ..; ++j) { .... i = j; }
  3325. // In this case, j = {1,+,1} and BEValue is j.
  3326. // Because the other in-value of i (0) fits the evolution of BEValue
  3327. // i really is an addrec evolution.
  3328. if (AddRec->getLoop() == L && AddRec->isAffine()) {
  3329. const SCEV *StartVal = getSCEV(StartValueV);
  3330. // If StartVal = j.start - j.stride, we can use StartVal as the
  3331. // initial step of the addrec evolution.
  3332. if (StartVal == getMinusSCEV(AddRec->getOperand(0),
  3333. AddRec->getOperand(1))) {
  3334. // FIXME: For constant StartVal, we should be able to infer
  3335. // no-wrap flags.
  3336. const SCEV *PHISCEV =
  3337. getAddRecExpr(StartVal, AddRec->getOperand(1), L,
  3338. SCEV::FlagAnyWrap);
  3339. // Okay, for the entire analysis of this edge we assumed the PHI
  3340. // to be symbolic. We now need to go back and purge all of the
  3341. // entries for the scalars that use the symbolic expression.
  3342. ForgetSymbolicName(PN, SymbolicName);
  3343. ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
  3344. return PHISCEV;
  3345. }
  3346. }
  3347. }
  3348. }
  3349. }
  3350. // If the PHI has a single incoming value, follow that value, unless the
  3351. // PHI's incoming blocks are in a different loop, in which case doing so
  3352. // risks breaking LCSSA form. Instcombine would normally zap these, but
  3353. // it doesn't have DominatorTree information, so it may miss cases.
  3354. if (Value *V =
  3355. SimplifyInstruction(PN, F->getParent()->getDataLayout(), TLI, DT, AC))
  3356. if (LI->replacementPreservesLCSSAForm(PN, V))
  3357. return getSCEV(V);
  3358. // If it's not a loop phi, we can't handle it yet.
  3359. return getUnknown(PN);
  3360. }
  3361. /// createNodeForGEP - Expand GEP instructions into add and multiply
  3362. /// operations. This allows them to be analyzed by regular SCEV code.
  3363. ///
  3364. const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
  3365. Value *Base = GEP->getOperand(0);
  3366. // Don't attempt to analyze GEPs over unsized objects.
  3367. if (!Base->getType()->getPointerElementType()->isSized())
  3368. return getUnknown(GEP);
  3369. SmallVector<const SCEV *, 4> IndexExprs;
  3370. for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
  3371. IndexExprs.push_back(getSCEV(*Index));
  3372. return getGEPExpr(GEP->getSourceElementType(), getSCEV(Base), IndexExprs,
  3373. GEP->isInBounds());
  3374. }
  3375. /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
  3376. /// guaranteed to end in (at every loop iteration). It is, at the same time,
  3377. /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
  3378. /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
  3379. uint32_t
  3380. ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
  3381. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
  3382. return C->getValue()->getValue().countTrailingZeros();
  3383. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
  3384. return std::min(GetMinTrailingZeros(T->getOperand()),
  3385. (uint32_t)getTypeSizeInBits(T->getType()));
  3386. if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
  3387. uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
  3388. return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
  3389. getTypeSizeInBits(E->getType()) : OpRes;
  3390. }
  3391. if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
  3392. uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
  3393. return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
  3394. getTypeSizeInBits(E->getType()) : OpRes;
  3395. }
  3396. if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
  3397. // The result is the min of all operands results.
  3398. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
  3399. for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
  3400. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
  3401. return MinOpRes;
  3402. }
  3403. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
  3404. // The result is the sum of all operands results.
  3405. uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
  3406. uint32_t BitWidth = getTypeSizeInBits(M->getType());
  3407. for (unsigned i = 1, e = M->getNumOperands();
  3408. SumOpRes != BitWidth && i != e; ++i)
  3409. SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
  3410. BitWidth);
  3411. return SumOpRes;
  3412. }
  3413. if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
  3414. // The result is the min of all operands results.
  3415. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
  3416. for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
  3417. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
  3418. return MinOpRes;
  3419. }
  3420. if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
  3421. // The result is the min of all operands results.
  3422. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
  3423. for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
  3424. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
  3425. return MinOpRes;
  3426. }
  3427. if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
  3428. // The result is the min of all operands results.
  3429. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
  3430. for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
  3431. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
  3432. return MinOpRes;
  3433. }
  3434. if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
  3435. // For a SCEVUnknown, ask ValueTracking.
  3436. unsigned BitWidth = getTypeSizeInBits(U->getType());
  3437. APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
  3438. computeKnownBits(U->getValue(), Zeros, Ones,
  3439. F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
  3440. return Zeros.countTrailingOnes();
  3441. }
  3442. // SCEVUDivExpr
  3443. return 0;
  3444. }
  3445. /// GetRangeFromMetadata - Helper method to assign a range to V from
  3446. /// metadata present in the IR.
  3447. static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
  3448. if (Instruction *I = dyn_cast<Instruction>(V)) {
  3449. if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) {
  3450. ConstantRange TotalRange(
  3451. cast<IntegerType>(I->getType())->getBitWidth(), false);
  3452. unsigned NumRanges = MD->getNumOperands() / 2;
  3453. assert(NumRanges >= 1);
  3454. for (unsigned i = 0; i < NumRanges; ++i) {
  3455. ConstantInt *Lower =
  3456. mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 0));
  3457. ConstantInt *Upper =
  3458. mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 1));
  3459. ConstantRange Range(Lower->getValue(), Upper->getValue());
  3460. TotalRange = TotalRange.unionWith(Range);
  3461. }
  3462. return TotalRange;
  3463. }
  3464. }
  3465. return None;
  3466. }
  3467. /// getRange - Determine the range for a particular SCEV. If SignHint is
  3468. /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
  3469. /// with a "cleaner" unsigned (resp. signed) representation.
  3470. ///
  3471. ConstantRange
  3472. ScalarEvolution::getRange(const SCEV *S,
  3473. ScalarEvolution::RangeSignHint SignHint) {
  3474. DenseMap<const SCEV *, ConstantRange> &Cache =
  3475. SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
  3476. : SignedRanges;
  3477. // See if we've computed this range already.
  3478. DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
  3479. if (I != Cache.end())
  3480. return I->second;
  3481. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
  3482. return setRange(C, SignHint, ConstantRange(C->getValue()->getValue()));
  3483. unsigned BitWidth = getTypeSizeInBits(S->getType());
  3484. ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
  3485. // If the value has known zeros, the maximum value will have those known zeros
  3486. // as well.
  3487. uint32_t TZ = GetMinTrailingZeros(S);
  3488. if (TZ != 0) {
  3489. if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
  3490. ConservativeResult =
  3491. ConstantRange(APInt::getMinValue(BitWidth),
  3492. APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
  3493. else
  3494. ConservativeResult = ConstantRange(
  3495. APInt::getSignedMinValue(BitWidth),
  3496. APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
  3497. }
  3498. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
  3499. ConstantRange X = getRange(Add->getOperand(0), SignHint);
  3500. for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
  3501. X = X.add(getRange(Add->getOperand(i), SignHint));
  3502. return setRange(Add, SignHint, ConservativeResult.intersectWith(X));
  3503. }
  3504. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
  3505. ConstantRange X = getRange(Mul->getOperand(0), SignHint);
  3506. for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
  3507. X = X.multiply(getRange(Mul->getOperand(i), SignHint));
  3508. return setRange(Mul, SignHint, ConservativeResult.intersectWith(X));
  3509. }
  3510. if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
  3511. ConstantRange X = getRange(SMax->getOperand(0), SignHint);
  3512. for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
  3513. X = X.smax(getRange(SMax->getOperand(i), SignHint));
  3514. return setRange(SMax, SignHint, ConservativeResult.intersectWith(X));
  3515. }
  3516. if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
  3517. ConstantRange X = getRange(UMax->getOperand(0), SignHint);
  3518. for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
  3519. X = X.umax(getRange(UMax->getOperand(i), SignHint));
  3520. return setRange(UMax, SignHint, ConservativeResult.intersectWith(X));
  3521. }
  3522. if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
  3523. ConstantRange X = getRange(UDiv->getLHS(), SignHint);
  3524. ConstantRange Y = getRange(UDiv->getRHS(), SignHint);
  3525. return setRange(UDiv, SignHint,
  3526. ConservativeResult.intersectWith(X.udiv(Y)));
  3527. }
  3528. if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
  3529. ConstantRange X = getRange(ZExt->getOperand(), SignHint);
  3530. return setRange(ZExt, SignHint,
  3531. ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
  3532. }
  3533. if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
  3534. ConstantRange X = getRange(SExt->getOperand(), SignHint);
  3535. return setRange(SExt, SignHint,
  3536. ConservativeResult.intersectWith(X.signExtend(BitWidth)));
  3537. }
  3538. if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
  3539. ConstantRange X = getRange(Trunc->getOperand(), SignHint);
  3540. return setRange(Trunc, SignHint,
  3541. ConservativeResult.intersectWith(X.truncate(BitWidth)));
  3542. }
  3543. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
  3544. // If there's no unsigned wrap, the value will never be less than its
  3545. // initial value.
  3546. if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
  3547. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
  3548. if (!C->getValue()->isZero())
  3549. ConservativeResult =
  3550. ConservativeResult.intersectWith(
  3551. ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
  3552. // If there's no signed wrap, and all the operands have the same sign or
  3553. // zero, the value won't ever change sign.
  3554. if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
  3555. bool AllNonNeg = true;
  3556. bool AllNonPos = true;
  3557. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
  3558. if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
  3559. if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
  3560. }
  3561. if (AllNonNeg)
  3562. ConservativeResult = ConservativeResult.intersectWith(
  3563. ConstantRange(APInt(BitWidth, 0),
  3564. APInt::getSignedMinValue(BitWidth)));
  3565. else if (AllNonPos)
  3566. ConservativeResult = ConservativeResult.intersectWith(
  3567. ConstantRange(APInt::getSignedMinValue(BitWidth),
  3568. APInt(BitWidth, 1)));
  3569. }
  3570. // TODO: non-affine addrec
  3571. if (AddRec->isAffine()) {
  3572. Type *Ty = AddRec->getType();
  3573. const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
  3574. if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
  3575. getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
  3576. // Check for overflow. This must be done with ConstantRange arithmetic
  3577. // because we could be called from within the ScalarEvolution overflow
  3578. // checking code.
  3579. MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
  3580. ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
  3581. ConstantRange ZExtMaxBECountRange =
  3582. MaxBECountRange.zextOrTrunc(BitWidth * 2 + 1);
  3583. const SCEV *Start = AddRec->getStart();
  3584. const SCEV *Step = AddRec->getStepRecurrence(*this);
  3585. ConstantRange StepSRange = getSignedRange(Step);
  3586. ConstantRange SExtStepSRange = StepSRange.sextOrTrunc(BitWidth * 2 + 1);
  3587. ConstantRange StartURange = getUnsignedRange(Start);
  3588. ConstantRange EndURange =
  3589. StartURange.add(MaxBECountRange.multiply(StepSRange));
  3590. // Check for unsigned overflow.
  3591. ConstantRange ZExtStartURange =
  3592. StartURange.zextOrTrunc(BitWidth * 2 + 1);
  3593. ConstantRange ZExtEndURange = EndURange.zextOrTrunc(BitWidth * 2 + 1);
  3594. if (ZExtStartURange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) ==
  3595. ZExtEndURange) {
  3596. APInt Min = APIntOps::umin(StartURange.getUnsignedMin(),
  3597. EndURange.getUnsignedMin());
  3598. APInt Max = APIntOps::umax(StartURange.getUnsignedMax(),
  3599. EndURange.getUnsignedMax());
  3600. bool IsFullRange = Min.isMinValue() && Max.isMaxValue();
  3601. if (!IsFullRange)
  3602. ConservativeResult =
  3603. ConservativeResult.intersectWith(ConstantRange(Min, Max + 1));
  3604. }
  3605. ConstantRange StartSRange = getSignedRange(Start);
  3606. ConstantRange EndSRange =
  3607. StartSRange.add(MaxBECountRange.multiply(StepSRange));
  3608. // Check for signed overflow. This must be done with ConstantRange
  3609. // arithmetic because we could be called from within the ScalarEvolution
  3610. // overflow checking code.
  3611. ConstantRange SExtStartSRange =
  3612. StartSRange.sextOrTrunc(BitWidth * 2 + 1);
  3613. ConstantRange SExtEndSRange = EndSRange.sextOrTrunc(BitWidth * 2 + 1);
  3614. if (SExtStartSRange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) ==
  3615. SExtEndSRange) {
  3616. APInt Min = APIntOps::smin(StartSRange.getSignedMin(),
  3617. EndSRange.getSignedMin());
  3618. APInt Max = APIntOps::smax(StartSRange.getSignedMax(),
  3619. EndSRange.getSignedMax());
  3620. bool IsFullRange = Min.isMinSignedValue() && Max.isMaxSignedValue();
  3621. if (!IsFullRange)
  3622. ConservativeResult =
  3623. ConservativeResult.intersectWith(ConstantRange(Min, Max + 1));
  3624. }
  3625. }
  3626. }
  3627. return setRange(AddRec, SignHint, ConservativeResult);
  3628. }
  3629. if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
  3630. // Check if the IR explicitly contains !range metadata.
  3631. Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
  3632. if (MDRange.hasValue())
  3633. ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue());
  3634. // Split here to avoid paying the compile-time cost of calling both
  3635. // computeKnownBits and ComputeNumSignBits. This restriction can be lifted
  3636. // if needed.
  3637. const DataLayout &DL = F->getParent()->getDataLayout();
  3638. if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
  3639. // For a SCEVUnknown, ask ValueTracking.
  3640. APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
  3641. computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, AC, nullptr, DT);
  3642. if (Ones != ~Zeros + 1)
  3643. ConservativeResult =
  3644. ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
  3645. } else {
  3646. assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
  3647. "generalize as needed!");
  3648. unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT);
  3649. if (NS > 1)
  3650. ConservativeResult = ConservativeResult.intersectWith(
  3651. ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
  3652. APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
  3653. }
  3654. return setRange(U, SignHint, ConservativeResult);
  3655. }
  3656. return setRange(S, SignHint, ConservativeResult);
  3657. }
  3658. /// createSCEV - We know that there is no SCEV for the specified value.
  3659. /// Analyze the expression.
  3660. ///
  3661. const SCEV *ScalarEvolution::createSCEV(Value *V) {
  3662. if (!isSCEVable(V->getType()))
  3663. return getUnknown(V);
  3664. unsigned Opcode = Instruction::UserOp1;
  3665. if (Instruction *I = dyn_cast<Instruction>(V)) {
  3666. Opcode = I->getOpcode();
  3667. // Don't attempt to analyze instructions in blocks that aren't
  3668. // reachable. Such instructions don't matter, and they aren't required
  3669. // to obey basic rules for definitions dominating uses which this
  3670. // analysis depends on.
  3671. if (!DT->isReachableFromEntry(I->getParent()))
  3672. return getUnknown(V);
  3673. } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
  3674. Opcode = CE->getOpcode();
  3675. else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
  3676. return getConstant(CI);
  3677. else if (isa<ConstantPointerNull>(V))
  3678. return getConstant(V->getType(), 0);
  3679. else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
  3680. return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
  3681. else
  3682. return getUnknown(V);
  3683. Operator *U = cast<Operator>(V);
  3684. switch (Opcode) {
  3685. case Instruction::Add: {
  3686. // The simple thing to do would be to just call getSCEV on both operands
  3687. // and call getAddExpr with the result. However if we're looking at a
  3688. // bunch of things all added together, this can be quite inefficient,
  3689. // because it leads to N-1 getAddExpr calls for N ultimate operands.
  3690. // Instead, gather up all the operands and make a single getAddExpr call.
  3691. // LLVM IR canonical form means we need only traverse the left operands.
  3692. //
  3693. // Don't apply this instruction's NSW or NUW flags to the new
  3694. // expression. The instruction may be guarded by control flow that the
  3695. // no-wrap behavior depends on. Non-control-equivalent instructions can be
  3696. // mapped to the same SCEV expression, and it would be incorrect to transfer
  3697. // NSW/NUW semantics to those operations.
  3698. SmallVector<const SCEV *, 4> AddOps;
  3699. AddOps.push_back(getSCEV(U->getOperand(1)));
  3700. for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
  3701. unsigned Opcode = Op->getValueID() - Value::InstructionVal;
  3702. if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
  3703. break;
  3704. U = cast<Operator>(Op);
  3705. const SCEV *Op1 = getSCEV(U->getOperand(1));
  3706. if (Opcode == Instruction::Sub)
  3707. AddOps.push_back(getNegativeSCEV(Op1));
  3708. else
  3709. AddOps.push_back(Op1);
  3710. }
  3711. AddOps.push_back(getSCEV(U->getOperand(0)));
  3712. return getAddExpr(AddOps);
  3713. }
  3714. case Instruction::Mul: {
  3715. // Don't transfer NSW/NUW for the same reason as AddExpr.
  3716. SmallVector<const SCEV *, 4> MulOps;
  3717. MulOps.push_back(getSCEV(U->getOperand(1)));
  3718. for (Value *Op = U->getOperand(0);
  3719. Op->getValueID() == Instruction::Mul + Value::InstructionVal;
  3720. Op = U->getOperand(0)) {
  3721. U = cast<Operator>(Op);
  3722. MulOps.push_back(getSCEV(U->getOperand(1)));
  3723. }
  3724. MulOps.push_back(getSCEV(U->getOperand(0)));
  3725. return getMulExpr(MulOps);
  3726. }
  3727. case Instruction::UDiv:
  3728. return getUDivExpr(getSCEV(U->getOperand(0)),
  3729. getSCEV(U->getOperand(1)));
  3730. case Instruction::Sub:
  3731. return getMinusSCEV(getSCEV(U->getOperand(0)),
  3732. getSCEV(U->getOperand(1)));
  3733. case Instruction::And:
  3734. // For an expression like x&255 that merely masks off the high bits,
  3735. // use zext(trunc(x)) as the SCEV expression.
  3736. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3737. if (CI->isNullValue())
  3738. return getSCEV(U->getOperand(1));
  3739. if (CI->isAllOnesValue())
  3740. return getSCEV(U->getOperand(0));
  3741. const APInt &A = CI->getValue();
  3742. // Instcombine's ShrinkDemandedConstant may strip bits out of
  3743. // constants, obscuring what would otherwise be a low-bits mask.
  3744. // Use computeKnownBits to compute what ShrinkDemandedConstant
  3745. // knew about to reconstruct a low-bits mask value.
  3746. unsigned LZ = A.countLeadingZeros();
  3747. unsigned TZ = A.countTrailingZeros();
  3748. unsigned BitWidth = A.getBitWidth();
  3749. APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
  3750. computeKnownBits(U->getOperand(0), KnownZero, KnownOne,
  3751. F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
  3752. APInt EffectiveMask =
  3753. APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
  3754. if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) {
  3755. const SCEV *MulCount = getConstant(
  3756. ConstantInt::get(getContext(), APInt::getOneBitSet(BitWidth, TZ)));
  3757. return getMulExpr(
  3758. getZeroExtendExpr(
  3759. getTruncateExpr(
  3760. getUDivExactExpr(getSCEV(U->getOperand(0)), MulCount),
  3761. IntegerType::get(getContext(), BitWidth - LZ - TZ)),
  3762. U->getType()),
  3763. MulCount);
  3764. }
  3765. }
  3766. break;
  3767. case Instruction::Or:
  3768. // If the RHS of the Or is a constant, we may have something like:
  3769. // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
  3770. // optimizations will transparently handle this case.
  3771. //
  3772. // In order for this transformation to be safe, the LHS must be of the
  3773. // form X*(2^n) and the Or constant must be less than 2^n.
  3774. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3775. const SCEV *LHS = getSCEV(U->getOperand(0));
  3776. const APInt &CIVal = CI->getValue();
  3777. if (GetMinTrailingZeros(LHS) >=
  3778. (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
  3779. // Build a plain add SCEV.
  3780. const SCEV *S = getAddExpr(LHS, getSCEV(CI));
  3781. // If the LHS of the add was an addrec and it has no-wrap flags,
  3782. // transfer the no-wrap flags, since an or won't introduce a wrap.
  3783. if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
  3784. const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
  3785. const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
  3786. OldAR->getNoWrapFlags());
  3787. }
  3788. return S;
  3789. }
  3790. }
  3791. break;
  3792. case Instruction::Xor:
  3793. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3794. // If the RHS of the xor is a signbit, then this is just an add.
  3795. // Instcombine turns add of signbit into xor as a strength reduction step.
  3796. if (CI->getValue().isSignBit())
  3797. return getAddExpr(getSCEV(U->getOperand(0)),
  3798. getSCEV(U->getOperand(1)));
  3799. // If the RHS of xor is -1, then this is a not operation.
  3800. if (CI->isAllOnesValue())
  3801. return getNotSCEV(getSCEV(U->getOperand(0)));
  3802. // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
  3803. // This is a variant of the check for xor with -1, and it handles
  3804. // the case where instcombine has trimmed non-demanded bits out
  3805. // of an xor with -1.
  3806. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
  3807. if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
  3808. if (BO->getOpcode() == Instruction::And &&
  3809. LCI->getValue() == CI->getValue())
  3810. if (const SCEVZeroExtendExpr *Z =
  3811. dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
  3812. Type *UTy = U->getType();
  3813. const SCEV *Z0 = Z->getOperand();
  3814. Type *Z0Ty = Z0->getType();
  3815. unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
  3816. // If C is a low-bits mask, the zero extend is serving to
  3817. // mask off the high bits. Complement the operand and
  3818. // re-apply the zext.
  3819. if (APIntOps::isMask(Z0TySize, CI->getValue()))
  3820. return getZeroExtendExpr(getNotSCEV(Z0), UTy);
  3821. // If C is a single bit, it may be in the sign-bit position
  3822. // before the zero-extend. In this case, represent the xor
  3823. // using an add, which is equivalent, and re-apply the zext.
  3824. APInt Trunc = CI->getValue().trunc(Z0TySize);
  3825. if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
  3826. Trunc.isSignBit())
  3827. return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
  3828. UTy);
  3829. }
  3830. }
  3831. break;
  3832. case Instruction::Shl:
  3833. // Turn shift left of a constant amount into a multiply.
  3834. if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3835. uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
  3836. // If the shift count is not less than the bitwidth, the result of
  3837. // the shift is undefined. Don't try to analyze it, because the
  3838. // resolution chosen here may differ from the resolution chosen in
  3839. // other parts of the compiler.
  3840. if (SA->getValue().uge(BitWidth))
  3841. break;
  3842. Constant *X = ConstantInt::get(getContext(),
  3843. APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
  3844. return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
  3845. }
  3846. break;
  3847. case Instruction::LShr:
  3848. // Turn logical shift right of a constant into a unsigned divide.
  3849. if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3850. uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
  3851. // If the shift count is not less than the bitwidth, the result of
  3852. // the shift is undefined. Don't try to analyze it, because the
  3853. // resolution chosen here may differ from the resolution chosen in
  3854. // other parts of the compiler.
  3855. if (SA->getValue().uge(BitWidth))
  3856. break;
  3857. Constant *X = ConstantInt::get(getContext(),
  3858. APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
  3859. return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
  3860. }
  3861. break;
  3862. case Instruction::AShr:
  3863. // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
  3864. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
  3865. if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
  3866. if (L->getOpcode() == Instruction::Shl &&
  3867. L->getOperand(1) == U->getOperand(1)) {
  3868. uint64_t BitWidth = getTypeSizeInBits(U->getType());
  3869. // If the shift count is not less than the bitwidth, the result of
  3870. // the shift is undefined. Don't try to analyze it, because the
  3871. // resolution chosen here may differ from the resolution chosen in
  3872. // other parts of the compiler.
  3873. if (CI->getValue().uge(BitWidth))
  3874. break;
  3875. uint64_t Amt = BitWidth - CI->getZExtValue();
  3876. if (Amt == BitWidth)
  3877. return getSCEV(L->getOperand(0)); // shift by zero --> noop
  3878. return
  3879. getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
  3880. IntegerType::get(getContext(),
  3881. Amt)),
  3882. U->getType());
  3883. }
  3884. break;
  3885. case Instruction::Trunc:
  3886. return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
  3887. case Instruction::ZExt:
  3888. return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
  3889. case Instruction::SExt:
  3890. return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
  3891. case Instruction::BitCast:
  3892. // BitCasts are no-op casts so we just eliminate the cast.
  3893. if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
  3894. return getSCEV(U->getOperand(0));
  3895. break;
  3896. // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
  3897. // lead to pointer expressions which cannot safely be expanded to GEPs,
  3898. // because ScalarEvolution doesn't respect the GEP aliasing rules when
  3899. // simplifying integer expressions.
  3900. case Instruction::GetElementPtr:
  3901. return createNodeForGEP(cast<GEPOperator>(U));
  3902. case Instruction::PHI:
  3903. return createNodeForPHI(cast<PHINode>(U));
  3904. case Instruction::Select:
  3905. // This could be a smax or umax that was lowered earlier.
  3906. // Try to recover it.
  3907. if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
  3908. Value *LHS = ICI->getOperand(0);
  3909. Value *RHS = ICI->getOperand(1);
  3910. switch (ICI->getPredicate()) {
  3911. case ICmpInst::ICMP_SLT:
  3912. case ICmpInst::ICMP_SLE:
  3913. std::swap(LHS, RHS);
  3914. // fall through
  3915. case ICmpInst::ICMP_SGT:
  3916. case ICmpInst::ICMP_SGE:
  3917. // a >s b ? a+x : b+x -> smax(a, b)+x
  3918. // a >s b ? b+x : a+x -> smin(a, b)+x
  3919. if (getTypeSizeInBits(LHS->getType()) <=
  3920. getTypeSizeInBits(U->getType())) {
  3921. const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), U->getType());
  3922. const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), U->getType());
  3923. const SCEV *LA = getSCEV(U->getOperand(1));
  3924. const SCEV *RA = getSCEV(U->getOperand(2));
  3925. const SCEV *LDiff = getMinusSCEV(LA, LS);
  3926. const SCEV *RDiff = getMinusSCEV(RA, RS);
  3927. if (LDiff == RDiff)
  3928. return getAddExpr(getSMaxExpr(LS, RS), LDiff);
  3929. LDiff = getMinusSCEV(LA, RS);
  3930. RDiff = getMinusSCEV(RA, LS);
  3931. if (LDiff == RDiff)
  3932. return getAddExpr(getSMinExpr(LS, RS), LDiff);
  3933. }
  3934. break;
  3935. case ICmpInst::ICMP_ULT:
  3936. case ICmpInst::ICMP_ULE:
  3937. std::swap(LHS, RHS);
  3938. // fall through
  3939. case ICmpInst::ICMP_UGT:
  3940. case ICmpInst::ICMP_UGE:
  3941. // a >u b ? a+x : b+x -> umax(a, b)+x
  3942. // a >u b ? b+x : a+x -> umin(a, b)+x
  3943. if (getTypeSizeInBits(LHS->getType()) <=
  3944. getTypeSizeInBits(U->getType())) {
  3945. const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType());
  3946. const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), U->getType());
  3947. const SCEV *LA = getSCEV(U->getOperand(1));
  3948. const SCEV *RA = getSCEV(U->getOperand(2));
  3949. const SCEV *LDiff = getMinusSCEV(LA, LS);
  3950. const SCEV *RDiff = getMinusSCEV(RA, RS);
  3951. if (LDiff == RDiff)
  3952. return getAddExpr(getUMaxExpr(LS, RS), LDiff);
  3953. LDiff = getMinusSCEV(LA, RS);
  3954. RDiff = getMinusSCEV(RA, LS);
  3955. if (LDiff == RDiff)
  3956. return getAddExpr(getUMinExpr(LS, RS), LDiff);
  3957. }
  3958. break;
  3959. case ICmpInst::ICMP_NE:
  3960. // n != 0 ? n+x : 1+x -> umax(n, 1)+x
  3961. if (getTypeSizeInBits(LHS->getType()) <=
  3962. getTypeSizeInBits(U->getType()) &&
  3963. isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
  3964. const SCEV *One = getConstant(U->getType(), 1);
  3965. const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType());
  3966. const SCEV *LA = getSCEV(U->getOperand(1));
  3967. const SCEV *RA = getSCEV(U->getOperand(2));
  3968. const SCEV *LDiff = getMinusSCEV(LA, LS);
  3969. const SCEV *RDiff = getMinusSCEV(RA, One);
  3970. if (LDiff == RDiff)
  3971. return getAddExpr(getUMaxExpr(One, LS), LDiff);
  3972. }
  3973. break;
  3974. case ICmpInst::ICMP_EQ:
  3975. // n == 0 ? 1+x : n+x -> umax(n, 1)+x
  3976. if (getTypeSizeInBits(LHS->getType()) <=
  3977. getTypeSizeInBits(U->getType()) &&
  3978. isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
  3979. const SCEV *One = getConstant(U->getType(), 1);
  3980. const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType());
  3981. const SCEV *LA = getSCEV(U->getOperand(1));
  3982. const SCEV *RA = getSCEV(U->getOperand(2));
  3983. const SCEV *LDiff = getMinusSCEV(LA, One);
  3984. const SCEV *RDiff = getMinusSCEV(RA, LS);
  3985. if (LDiff == RDiff)
  3986. return getAddExpr(getUMaxExpr(One, LS), LDiff);
  3987. }
  3988. break;
  3989. default:
  3990. break;
  3991. }
  3992. }
  3993. default: // We cannot analyze this expression.
  3994. break;
  3995. }
  3996. return getUnknown(V);
  3997. }
  3998. //===----------------------------------------------------------------------===//
  3999. // Iteration Count Computation Code
  4000. //
  4001. unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L) {
  4002. if (BasicBlock *ExitingBB = L->getExitingBlock())
  4003. return getSmallConstantTripCount(L, ExitingBB);
  4004. // No trip count information for multiple exits.
  4005. return 0;
  4006. }
  4007. /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
  4008. /// normal unsigned value. Returns 0 if the trip count is unknown or not
  4009. /// constant. Will also return 0 if the maximum trip count is very large (>=
  4010. /// 2^32).
  4011. ///
  4012. /// This "trip count" assumes that control exits via ExitingBlock. More
  4013. /// precisely, it is the number of times that control may reach ExitingBlock
  4014. /// before taking the branch. For loops with multiple exits, it may not be the
  4015. /// number times that the loop header executes because the loop may exit
  4016. /// prematurely via another branch.
  4017. unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L,
  4018. BasicBlock *ExitingBlock) {
  4019. assert(ExitingBlock && "Must pass a non-null exiting block!");
  4020. assert(L->isLoopExiting(ExitingBlock) &&
  4021. "Exiting block must actually branch out of the loop!");
  4022. const SCEVConstant *ExitCount =
  4023. dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
  4024. if (!ExitCount)
  4025. return 0;
  4026. ConstantInt *ExitConst = ExitCount->getValue();
  4027. // Guard against huge trip counts.
  4028. if (ExitConst->getValue().getActiveBits() > 32)
  4029. return 0;
  4030. // In case of integer overflow, this returns 0, which is correct.
  4031. return ((unsigned)ExitConst->getZExtValue()) + 1;
  4032. }
  4033. unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L) {
  4034. if (BasicBlock *ExitingBB = L->getExitingBlock())
  4035. return getSmallConstantTripMultiple(L, ExitingBB);
  4036. // No trip multiple information for multiple exits.
  4037. return 0;
  4038. }
  4039. /// getSmallConstantTripMultiple - Returns the largest constant divisor of the
  4040. /// trip count of this loop as a normal unsigned value, if possible. This
  4041. /// means that the actual trip count is always a multiple of the returned
  4042. /// value (don't forget the trip count could very well be zero as well!).
  4043. ///
  4044. /// Returns 1 if the trip count is unknown or not guaranteed to be the
  4045. /// multiple of a constant (which is also the case if the trip count is simply
  4046. /// constant, use getSmallConstantTripCount for that case), Will also return 1
  4047. /// if the trip count is very large (>= 2^32).
  4048. ///
  4049. /// As explained in the comments for getSmallConstantTripCount, this assumes
  4050. /// that control exits the loop via ExitingBlock.
  4051. unsigned
  4052. ScalarEvolution::getSmallConstantTripMultiple(Loop *L,
  4053. BasicBlock *ExitingBlock) {
  4054. assert(ExitingBlock && "Must pass a non-null exiting block!");
  4055. assert(L->isLoopExiting(ExitingBlock) &&
  4056. "Exiting block must actually branch out of the loop!");
  4057. const SCEV *ExitCount = getExitCount(L, ExitingBlock);
  4058. if (ExitCount == getCouldNotCompute())
  4059. return 1;
  4060. // Get the trip count from the BE count by adding 1.
  4061. const SCEV *TCMul = getAddExpr(ExitCount,
  4062. getConstant(ExitCount->getType(), 1));
  4063. // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt
  4064. // to factor simple cases.
  4065. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul))
  4066. TCMul = Mul->getOperand(0);
  4067. const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul);
  4068. if (!MulC)
  4069. return 1;
  4070. ConstantInt *Result = MulC->getValue();
  4071. // Guard against huge trip counts (this requires checking
  4072. // for zero to handle the case where the trip count == -1 and the
  4073. // addition wraps).
  4074. if (!Result || Result->getValue().getActiveBits() > 32 ||
  4075. Result->getValue().getActiveBits() == 0)
  4076. return 1;
  4077. return (unsigned)Result->getZExtValue();
  4078. }
  4079. // getExitCount - Get the expression for the number of loop iterations for which
  4080. // this loop is guaranteed not to exit via ExitingBlock. Otherwise return
  4081. // SCEVCouldNotCompute.
  4082. const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) {
  4083. return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
  4084. }
  4085. /// getBackedgeTakenCount - If the specified loop has a predictable
  4086. /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
  4087. /// object. The backedge-taken count is the number of times the loop header
  4088. /// will be branched to from within the loop. This is one less than the
  4089. /// trip count of the loop, since it doesn't count the first iteration,
  4090. /// when the header is branched to from outside the loop.
  4091. ///
  4092. /// Note that it is not valid to call this method on a loop without a
  4093. /// loop-invariant backedge-taken count (see
  4094. /// hasLoopInvariantBackedgeTakenCount).
  4095. ///
  4096. const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
  4097. return getBackedgeTakenInfo(L).getExact(this);
  4098. }
  4099. /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
  4100. /// return the least SCEV value that is known never to be less than the
  4101. /// actual backedge taken count.
  4102. const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
  4103. return getBackedgeTakenInfo(L).getMax(this);
  4104. }
  4105. /// PushLoopPHIs - Push PHI nodes in the header of the given loop
  4106. /// onto the given Worklist.
  4107. static void
  4108. PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
  4109. BasicBlock *Header = L->getHeader();
  4110. // Push all Loop-header PHIs onto the Worklist stack.
  4111. for (BasicBlock::iterator I = Header->begin();
  4112. PHINode *PN = dyn_cast<PHINode>(I); ++I)
  4113. Worklist.push_back(PN);
  4114. }
  4115. const ScalarEvolution::BackedgeTakenInfo &
  4116. ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
  4117. // Initially insert an invalid entry for this loop. If the insertion
  4118. // succeeds, proceed to actually compute a backedge-taken count and
  4119. // update the value. The temporary CouldNotCompute value tells SCEV
  4120. // code elsewhere that it shouldn't attempt to request a new
  4121. // backedge-taken count, which could result in infinite recursion.
  4122. std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
  4123. BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo()));
  4124. if (!Pair.second)
  4125. return Pair.first->second;
  4126. // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it
  4127. // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
  4128. // must be cleared in this scope.
  4129. BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L);
  4130. if (Result.getExact(this) != getCouldNotCompute()) {
  4131. assert(isLoopInvariant(Result.getExact(this), L) &&
  4132. isLoopInvariant(Result.getMax(this), L) &&
  4133. "Computed backedge-taken count isn't loop invariant for loop!");
  4134. ++NumTripCountsComputed;
  4135. }
  4136. else if (Result.getMax(this) == getCouldNotCompute() &&
  4137. isa<PHINode>(L->getHeader()->begin())) {
  4138. // Only count loops that have phi nodes as not being computable.
  4139. ++NumTripCountsNotComputed;
  4140. }
  4141. // Now that we know more about the trip count for this loop, forget any
  4142. // existing SCEV values for PHI nodes in this loop since they are only
  4143. // conservative estimates made without the benefit of trip count
  4144. // information. This is similar to the code in forgetLoop, except that
  4145. // it handles SCEVUnknown PHI nodes specially.
  4146. if (Result.hasAnyInfo()) {
  4147. SmallVector<Instruction *, 16> Worklist;
  4148. PushLoopPHIs(L, Worklist);
  4149. SmallPtrSet<Instruction *, 8> Visited;
  4150. while (!Worklist.empty()) {
  4151. Instruction *I = Worklist.pop_back_val();
  4152. if (!Visited.insert(I).second)
  4153. continue;
  4154. ValueExprMapType::iterator It =
  4155. ValueExprMap.find_as(static_cast<Value *>(I));
  4156. if (It != ValueExprMap.end()) {
  4157. const SCEV *Old = It->second;
  4158. // SCEVUnknown for a PHI either means that it has an unrecognized
  4159. // structure, or it's a PHI that's in the progress of being computed
  4160. // by createNodeForPHI. In the former case, additional loop trip
  4161. // count information isn't going to change anything. In the later
  4162. // case, createNodeForPHI will perform the necessary updates on its
  4163. // own when it gets to that point.
  4164. if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
  4165. forgetMemoizedResults(Old);
  4166. ValueExprMap.erase(It);
  4167. }
  4168. if (PHINode *PN = dyn_cast<PHINode>(I))
  4169. ConstantEvolutionLoopExitValue.erase(PN);
  4170. }
  4171. PushDefUseChildren(I, Worklist);
  4172. }
  4173. }
  4174. // Re-lookup the insert position, since the call to
  4175. // ComputeBackedgeTakenCount above could result in a
  4176. // recusive call to getBackedgeTakenInfo (on a different
  4177. // loop), which would invalidate the iterator computed
  4178. // earlier.
  4179. return BackedgeTakenCounts.find(L)->second = Result;
  4180. }
  4181. /// forgetLoop - This method should be called by the client when it has
  4182. /// changed a loop in a way that may effect ScalarEvolution's ability to
  4183. /// compute a trip count, or if the loop is deleted.
  4184. void ScalarEvolution::forgetLoop(const Loop *L) {
  4185. // Drop any stored trip count value.
  4186. DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos =
  4187. BackedgeTakenCounts.find(L);
  4188. if (BTCPos != BackedgeTakenCounts.end()) {
  4189. BTCPos->second.clear();
  4190. BackedgeTakenCounts.erase(BTCPos);
  4191. }
  4192. // Drop information about expressions based on loop-header PHIs.
  4193. SmallVector<Instruction *, 16> Worklist;
  4194. PushLoopPHIs(L, Worklist);
  4195. SmallPtrSet<Instruction *, 8> Visited;
  4196. while (!Worklist.empty()) {
  4197. Instruction *I = Worklist.pop_back_val();
  4198. if (!Visited.insert(I).second)
  4199. continue;
  4200. ValueExprMapType::iterator It =
  4201. ValueExprMap.find_as(static_cast<Value *>(I));
  4202. if (It != ValueExprMap.end()) {
  4203. forgetMemoizedResults(It->second);
  4204. ValueExprMap.erase(It);
  4205. if (PHINode *PN = dyn_cast<PHINode>(I))
  4206. ConstantEvolutionLoopExitValue.erase(PN);
  4207. }
  4208. PushDefUseChildren(I, Worklist);
  4209. }
  4210. // Forget all contained loops too, to avoid dangling entries in the
  4211. // ValuesAtScopes map.
  4212. for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
  4213. forgetLoop(*I);
  4214. }
  4215. /// forgetValue - This method should be called by the client when it has
  4216. /// changed a value in a way that may effect its value, or which may
  4217. /// disconnect it from a def-use chain linking it to a loop.
  4218. void ScalarEvolution::forgetValue(Value *V) {
  4219. Instruction *I = dyn_cast<Instruction>(V);
  4220. if (!I) return;
  4221. // Drop information about expressions based on loop-header PHIs.
  4222. SmallVector<Instruction *, 16> Worklist;
  4223. Worklist.push_back(I);
  4224. SmallPtrSet<Instruction *, 8> Visited;
  4225. while (!Worklist.empty()) {
  4226. I = Worklist.pop_back_val();
  4227. if (!Visited.insert(I).second)
  4228. continue;
  4229. ValueExprMapType::iterator It =
  4230. ValueExprMap.find_as(static_cast<Value *>(I));
  4231. if (It != ValueExprMap.end()) {
  4232. forgetMemoizedResults(It->second);
  4233. ValueExprMap.erase(It);
  4234. if (PHINode *PN = dyn_cast<PHINode>(I))
  4235. ConstantEvolutionLoopExitValue.erase(PN);
  4236. }
  4237. PushDefUseChildren(I, Worklist);
  4238. }
  4239. }
  4240. /// getExact - Get the exact loop backedge taken count considering all loop
  4241. /// exits. A computable result can only be return for loops with a single exit.
  4242. /// Returning the minimum taken count among all exits is incorrect because one
  4243. /// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
  4244. /// the limit of each loop test is never skipped. This is a valid assumption as
  4245. /// long as the loop exits via that test. For precise results, it is the
  4246. /// caller's responsibility to specify the relevant loop exit using
  4247. /// getExact(ExitingBlock, SE).
  4248. const SCEV *
  4249. ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
  4250. // If any exits were not computable, the loop is not computable.
  4251. if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
  4252. // We need exactly one computable exit.
  4253. if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
  4254. assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
  4255. const SCEV *BECount = nullptr;
  4256. for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
  4257. ENT != nullptr; ENT = ENT->getNextExit()) {
  4258. assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
  4259. if (!BECount)
  4260. BECount = ENT->ExactNotTaken;
  4261. else if (BECount != ENT->ExactNotTaken)
  4262. return SE->getCouldNotCompute();
  4263. }
  4264. assert(BECount && "Invalid not taken count for loop exit");
  4265. return BECount;
  4266. }
  4267. /// getExact - Get the exact not taken count for this loop exit.
  4268. const SCEV *
  4269. ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
  4270. ScalarEvolution *SE) const {
  4271. for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
  4272. ENT != nullptr; ENT = ENT->getNextExit()) {
  4273. if (ENT->ExitingBlock == ExitingBlock)
  4274. return ENT->ExactNotTaken;
  4275. }
  4276. return SE->getCouldNotCompute();
  4277. }
  4278. /// getMax - Get the max backedge taken count for the loop.
  4279. const SCEV *
  4280. ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
  4281. return Max ? Max : SE->getCouldNotCompute();
  4282. }
  4283. bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
  4284. ScalarEvolution *SE) const {
  4285. if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S))
  4286. return true;
  4287. if (!ExitNotTaken.ExitingBlock)
  4288. return false;
  4289. for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
  4290. ENT != nullptr; ENT = ENT->getNextExit()) {
  4291. if (ENT->ExactNotTaken != SE->getCouldNotCompute()
  4292. && SE->hasOperand(ENT->ExactNotTaken, S)) {
  4293. return true;
  4294. }
  4295. }
  4296. return false;
  4297. }
  4298. /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
  4299. /// computable exit into a persistent ExitNotTakenInfo array.
  4300. ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
  4301. SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
  4302. bool Complete, const SCEV *MaxCount) : Max(MaxCount) {
  4303. if (!Complete)
  4304. ExitNotTaken.setIncomplete();
  4305. unsigned NumExits = ExitCounts.size();
  4306. if (NumExits == 0) return;
  4307. ExitNotTaken.ExitingBlock = ExitCounts[0].first;
  4308. ExitNotTaken.ExactNotTaken = ExitCounts[0].second;
  4309. if (NumExits == 1) return;
  4310. // Handle the rare case of multiple computable exits.
  4311. ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1];
  4312. ExitNotTakenInfo *PrevENT = &ExitNotTaken;
  4313. for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) {
  4314. PrevENT->setNextExit(ENT);
  4315. ENT->ExitingBlock = ExitCounts[i].first;
  4316. ENT->ExactNotTaken = ExitCounts[i].second;
  4317. }
  4318. }
  4319. /// clear - Invalidate this result and free the ExitNotTakenInfo array.
  4320. void ScalarEvolution::BackedgeTakenInfo::clear() {
  4321. ExitNotTaken.ExitingBlock = nullptr;
  4322. ExitNotTaken.ExactNotTaken = nullptr;
  4323. delete[] ExitNotTaken.getNextExit();
  4324. }
  4325. /// ComputeBackedgeTakenCount - Compute the number of times the backedge
  4326. /// of the specified loop will execute.
  4327. ScalarEvolution::BackedgeTakenInfo
  4328. ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
  4329. SmallVector<BasicBlock *, 8> ExitingBlocks;
  4330. L->getExitingBlocks(ExitingBlocks);
  4331. SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
  4332. bool CouldComputeBECount = true;
  4333. BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
  4334. const SCEV *MustExitMaxBECount = nullptr;
  4335. const SCEV *MayExitMaxBECount = nullptr;
  4336. // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
  4337. // and compute maxBECount.
  4338. for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
  4339. BasicBlock *ExitBB = ExitingBlocks[i];
  4340. ExitLimit EL = ComputeExitLimit(L, ExitBB);
  4341. // 1. For each exit that can be computed, add an entry to ExitCounts.
  4342. // CouldComputeBECount is true only if all exits can be computed.
  4343. if (EL.Exact == getCouldNotCompute())
  4344. // We couldn't compute an exact value for this exit, so
  4345. // we won't be able to compute an exact value for the loop.
  4346. CouldComputeBECount = false;
  4347. else
  4348. ExitCounts.push_back(std::make_pair(ExitBB, EL.Exact));
  4349. // 2. Derive the loop's MaxBECount from each exit's max number of
  4350. // non-exiting iterations. Partition the loop exits into two kinds:
  4351. // LoopMustExits and LoopMayExits.
  4352. //
  4353. // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
  4354. // is a LoopMayExit. If any computable LoopMustExit is found, then
  4355. // MaxBECount is the minimum EL.Max of computable LoopMustExits. Otherwise,
  4356. // MaxBECount is conservatively the maximum EL.Max, where CouldNotCompute is
  4357. // considered greater than any computable EL.Max.
  4358. if (EL.Max != getCouldNotCompute() && Latch &&
  4359. DT->dominates(ExitBB, Latch)) {
  4360. if (!MustExitMaxBECount)
  4361. MustExitMaxBECount = EL.Max;
  4362. else {
  4363. MustExitMaxBECount =
  4364. getUMinFromMismatchedTypes(MustExitMaxBECount, EL.Max);
  4365. }
  4366. } else if (MayExitMaxBECount != getCouldNotCompute()) {
  4367. if (!MayExitMaxBECount || EL.Max == getCouldNotCompute())
  4368. MayExitMaxBECount = EL.Max;
  4369. else {
  4370. MayExitMaxBECount =
  4371. getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.Max);
  4372. }
  4373. }
  4374. }
  4375. const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
  4376. (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
  4377. return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
  4378. }
  4379. /// ComputeExitLimit - Compute the number of times the backedge of the specified
  4380. /// loop will execute if it exits via the specified block.
  4381. ScalarEvolution::ExitLimit
  4382. ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
  4383. // Okay, we've chosen an exiting block. See what condition causes us to
  4384. // exit at this block and remember the exit block and whether all other targets
  4385. // lead to the loop header.
  4386. bool MustExecuteLoopHeader = true;
  4387. BasicBlock *Exit = nullptr;
  4388. for (succ_iterator SI = succ_begin(ExitingBlock), SE = succ_end(ExitingBlock);
  4389. SI != SE; ++SI)
  4390. if (!L->contains(*SI)) {
  4391. if (Exit) // Multiple exit successors.
  4392. return getCouldNotCompute();
  4393. Exit = *SI;
  4394. } else if (*SI != L->getHeader()) {
  4395. MustExecuteLoopHeader = false;
  4396. }
  4397. // At this point, we know we have a conditional branch that determines whether
  4398. // the loop is exited. However, we don't know if the branch is executed each
  4399. // time through the loop. If not, then the execution count of the branch will
  4400. // not be equal to the trip count of the loop.
  4401. //
  4402. // Currently we check for this by checking to see if the Exit branch goes to
  4403. // the loop header. If so, we know it will always execute the same number of
  4404. // times as the loop. We also handle the case where the exit block *is* the
  4405. // loop header. This is common for un-rotated loops.
  4406. //
  4407. // If both of those tests fail, walk up the unique predecessor chain to the
  4408. // header, stopping if there is an edge that doesn't exit the loop. If the
  4409. // header is reached, the execution count of the branch will be equal to the
  4410. // trip count of the loop.
  4411. //
  4412. // More extensive analysis could be done to handle more cases here.
  4413. //
  4414. if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) {
  4415. // The simple checks failed, try climbing the unique predecessor chain
  4416. // up to the header.
  4417. bool Ok = false;
  4418. for (BasicBlock *BB = ExitingBlock; BB; ) {
  4419. BasicBlock *Pred = BB->getUniquePredecessor();
  4420. if (!Pred)
  4421. return getCouldNotCompute();
  4422. TerminatorInst *PredTerm = Pred->getTerminator();
  4423. for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
  4424. BasicBlock *PredSucc = PredTerm->getSuccessor(i);
  4425. if (PredSucc == BB)
  4426. continue;
  4427. // If the predecessor has a successor that isn't BB and isn't
  4428. // outside the loop, assume the worst.
  4429. if (L->contains(PredSucc))
  4430. return getCouldNotCompute();
  4431. }
  4432. if (Pred == L->getHeader()) {
  4433. Ok = true;
  4434. break;
  4435. }
  4436. BB = Pred;
  4437. }
  4438. if (!Ok)
  4439. return getCouldNotCompute();
  4440. }
  4441. bool IsOnlyExit = (L->getExitingBlock() != nullptr);
  4442. TerminatorInst *Term = ExitingBlock->getTerminator();
  4443. if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
  4444. assert(BI->isConditional() && "If unconditional, it can't be in loop!");
  4445. // Proceed to the next level to examine the exit condition expression.
  4446. return ComputeExitLimitFromCond(L, BI->getCondition(), BI->getSuccessor(0),
  4447. BI->getSuccessor(1),
  4448. /*ControlsExit=*/IsOnlyExit);
  4449. }
  4450. if (SwitchInst *SI = dyn_cast<SwitchInst>(Term))
  4451. return ComputeExitLimitFromSingleExitSwitch(L, SI, Exit,
  4452. /*ControlsExit=*/IsOnlyExit);
  4453. return getCouldNotCompute();
  4454. }
  4455. /// ComputeExitLimitFromCond - Compute the number of times the
  4456. /// backedge of the specified loop will execute if its exit condition
  4457. /// were a conditional branch of ExitCond, TBB, and FBB.
  4458. ///
  4459. /// @param ControlsExit is true if ExitCond directly controls the exit
  4460. /// branch. In this case, we can assume that the loop exits only if the
  4461. /// condition is true and can infer that failing to meet the condition prior to
  4462. /// integer wraparound results in undefined behavior.
  4463. ScalarEvolution::ExitLimit
  4464. ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
  4465. Value *ExitCond,
  4466. BasicBlock *TBB,
  4467. BasicBlock *FBB,
  4468. bool ControlsExit) {
  4469. // Check if the controlling expression for this loop is an And or Or.
  4470. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
  4471. if (BO->getOpcode() == Instruction::And) {
  4472. // Recurse on the operands of the and.
  4473. bool EitherMayExit = L->contains(TBB);
  4474. ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
  4475. ControlsExit && !EitherMayExit);
  4476. ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
  4477. ControlsExit && !EitherMayExit);
  4478. const SCEV *BECount = getCouldNotCompute();
  4479. const SCEV *MaxBECount = getCouldNotCompute();
  4480. if (EitherMayExit) {
  4481. // Both conditions must be true for the loop to continue executing.
  4482. // Choose the less conservative count.
  4483. if (EL0.Exact == getCouldNotCompute() ||
  4484. EL1.Exact == getCouldNotCompute())
  4485. BECount = getCouldNotCompute();
  4486. else
  4487. BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
  4488. if (EL0.Max == getCouldNotCompute())
  4489. MaxBECount = EL1.Max;
  4490. else if (EL1.Max == getCouldNotCompute())
  4491. MaxBECount = EL0.Max;
  4492. else
  4493. MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
  4494. } else {
  4495. // Both conditions must be true at the same time for the loop to exit.
  4496. // For now, be conservative.
  4497. assert(L->contains(FBB) && "Loop block has no successor in loop!");
  4498. if (EL0.Max == EL1.Max)
  4499. MaxBECount = EL0.Max;
  4500. if (EL0.Exact == EL1.Exact)
  4501. BECount = EL0.Exact;
  4502. }
  4503. return ExitLimit(BECount, MaxBECount);
  4504. }
  4505. if (BO->getOpcode() == Instruction::Or) {
  4506. // Recurse on the operands of the or.
  4507. bool EitherMayExit = L->contains(FBB);
  4508. ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
  4509. ControlsExit && !EitherMayExit);
  4510. ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
  4511. ControlsExit && !EitherMayExit);
  4512. const SCEV *BECount = getCouldNotCompute();
  4513. const SCEV *MaxBECount = getCouldNotCompute();
  4514. if (EitherMayExit) {
  4515. // Both conditions must be false for the loop to continue executing.
  4516. // Choose the less conservative count.
  4517. if (EL0.Exact == getCouldNotCompute() ||
  4518. EL1.Exact == getCouldNotCompute())
  4519. BECount = getCouldNotCompute();
  4520. else
  4521. BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
  4522. if (EL0.Max == getCouldNotCompute())
  4523. MaxBECount = EL1.Max;
  4524. else if (EL1.Max == getCouldNotCompute())
  4525. MaxBECount = EL0.Max;
  4526. else
  4527. MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
  4528. } else {
  4529. // Both conditions must be false at the same time for the loop to exit.
  4530. // For now, be conservative.
  4531. assert(L->contains(TBB) && "Loop block has no successor in loop!");
  4532. if (EL0.Max == EL1.Max)
  4533. MaxBECount = EL0.Max;
  4534. if (EL0.Exact == EL1.Exact)
  4535. BECount = EL0.Exact;
  4536. }
  4537. return ExitLimit(BECount, MaxBECount);
  4538. }
  4539. }
  4540. // With an icmp, it may be feasible to compute an exact backedge-taken count.
  4541. // Proceed to the next level to examine the icmp.
  4542. if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
  4543. return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit);
  4544. // Check for a constant condition. These are normally stripped out by
  4545. // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
  4546. // preserve the CFG and is temporarily leaving constant conditions
  4547. // in place.
  4548. if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
  4549. if (L->contains(FBB) == !CI->getZExtValue())
  4550. // The backedge is always taken.
  4551. return getCouldNotCompute();
  4552. else
  4553. // The backedge is never taken.
  4554. return getConstant(CI->getType(), 0);
  4555. }
  4556. // If it's not an integer or pointer comparison then compute it the hard way.
  4557. return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
  4558. }
  4559. /// ComputeExitLimitFromICmp - Compute the number of times the
  4560. /// backedge of the specified loop will execute if its exit condition
  4561. /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
  4562. ScalarEvolution::ExitLimit
  4563. ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
  4564. ICmpInst *ExitCond,
  4565. BasicBlock *TBB,
  4566. BasicBlock *FBB,
  4567. bool ControlsExit) {
  4568. // If the condition was exit on true, convert the condition to exit on false
  4569. ICmpInst::Predicate Cond;
  4570. if (!L->contains(FBB))
  4571. Cond = ExitCond->getPredicate();
  4572. else
  4573. Cond = ExitCond->getInversePredicate();
  4574. // Handle common loops like: for (X = "string"; *X; ++X)
  4575. if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
  4576. if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
  4577. ExitLimit ItCnt =
  4578. ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond);
  4579. if (ItCnt.hasAnyInfo())
  4580. return ItCnt;
  4581. }
  4582. const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
  4583. const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
  4584. // Try to evaluate any dependencies out of the loop.
  4585. LHS = getSCEVAtScope(LHS, L);
  4586. RHS = getSCEVAtScope(RHS, L);
  4587. // At this point, we would like to compute how many iterations of the
  4588. // loop the predicate will return true for these inputs.
  4589. if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
  4590. // If there is a loop-invariant, force it into the RHS.
  4591. std::swap(LHS, RHS);
  4592. Cond = ICmpInst::getSwappedPredicate(Cond);
  4593. }
  4594. // Simplify the operands before analyzing them.
  4595. (void)SimplifyICmpOperands(Cond, LHS, RHS);
  4596. // If we have a comparison of a chrec against a constant, try to use value
  4597. // ranges to answer this query.
  4598. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
  4599. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
  4600. if (AddRec->getLoop() == L) {
  4601. // Form the constant range.
  4602. ConstantRange CompRange(
  4603. ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
  4604. const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
  4605. if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
  4606. }
  4607. switch (Cond) {
  4608. case ICmpInst::ICMP_NE: { // while (X != Y)
  4609. // Convert to: while (X-Y != 0)
  4610. ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
  4611. if (EL.hasAnyInfo()) return EL;
  4612. break;
  4613. }
  4614. case ICmpInst::ICMP_EQ: { // while (X == Y)
  4615. // Convert to: while (X-Y == 0)
  4616. ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
  4617. if (EL.hasAnyInfo()) return EL;
  4618. break;
  4619. }
  4620. case ICmpInst::ICMP_SLT:
  4621. case ICmpInst::ICMP_ULT: { // while (X < Y)
  4622. bool IsSigned = Cond == ICmpInst::ICMP_SLT;
  4623. ExitLimit EL = HowManyLessThans(LHS, RHS, L, IsSigned, ControlsExit);
  4624. if (EL.hasAnyInfo()) return EL;
  4625. break;
  4626. }
  4627. case ICmpInst::ICMP_SGT:
  4628. case ICmpInst::ICMP_UGT: { // while (X > Y)
  4629. bool IsSigned = Cond == ICmpInst::ICMP_SGT;
  4630. ExitLimit EL = HowManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit);
  4631. if (EL.hasAnyInfo()) return EL;
  4632. break;
  4633. }
  4634. default:
  4635. #if 0
  4636. dbgs() << "ComputeBackedgeTakenCount ";
  4637. if (ExitCond->getOperand(0)->getType()->isUnsigned())
  4638. dbgs() << "[unsigned] ";
  4639. dbgs() << *LHS << " "
  4640. << Instruction::getOpcodeName(Instruction::ICmp)
  4641. << " " << *RHS << "\n";
  4642. #endif
  4643. break;
  4644. }
  4645. return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
  4646. }
  4647. ScalarEvolution::ExitLimit
  4648. ScalarEvolution::ComputeExitLimitFromSingleExitSwitch(const Loop *L,
  4649. SwitchInst *Switch,
  4650. BasicBlock *ExitingBlock,
  4651. bool ControlsExit) {
  4652. assert(!L->contains(ExitingBlock) && "Not an exiting block!");
  4653. // Give up if the exit is the default dest of a switch.
  4654. if (Switch->getDefaultDest() == ExitingBlock)
  4655. return getCouldNotCompute();
  4656. assert(L->contains(Switch->getDefaultDest()) &&
  4657. "Default case must not exit the loop!");
  4658. const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
  4659. const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
  4660. // while (X != Y) --> while (X-Y != 0)
  4661. ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
  4662. if (EL.hasAnyInfo())
  4663. return EL;
  4664. return getCouldNotCompute();
  4665. }
  4666. static ConstantInt *
  4667. EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
  4668. ScalarEvolution &SE) {
  4669. const SCEV *InVal = SE.getConstant(C);
  4670. const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
  4671. assert(isa<SCEVConstant>(Val) &&
  4672. "Evaluation of SCEV at constant didn't fold correctly?");
  4673. return cast<SCEVConstant>(Val)->getValue();
  4674. }
  4675. /// ComputeLoadConstantCompareExitLimit - Given an exit condition of
  4676. /// 'icmp op load X, cst', try to see if we can compute the backedge
  4677. /// execution count.
  4678. ScalarEvolution::ExitLimit
  4679. ScalarEvolution::ComputeLoadConstantCompareExitLimit(
  4680. LoadInst *LI,
  4681. Constant *RHS,
  4682. const Loop *L,
  4683. ICmpInst::Predicate predicate) {
  4684. if (LI->isVolatile()) return getCouldNotCompute();
  4685. // Check to see if the loaded pointer is a getelementptr of a global.
  4686. // TODO: Use SCEV instead of manually grubbing with GEPs.
  4687. GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
  4688. if (!GEP) return getCouldNotCompute();
  4689. // Make sure that it is really a constant global we are gepping, with an
  4690. // initializer, and make sure the first IDX is really 0.
  4691. GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
  4692. if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
  4693. GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
  4694. !cast<Constant>(GEP->getOperand(1))->isNullValue())
  4695. return getCouldNotCompute();
  4696. // Okay, we allow one non-constant index into the GEP instruction.
  4697. Value *VarIdx = nullptr;
  4698. std::vector<Constant*> Indexes;
  4699. unsigned VarIdxNum = 0;
  4700. for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
  4701. if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
  4702. Indexes.push_back(CI);
  4703. } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
  4704. if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
  4705. VarIdx = GEP->getOperand(i);
  4706. VarIdxNum = i-2;
  4707. Indexes.push_back(nullptr);
  4708. }
  4709. // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
  4710. if (!VarIdx)
  4711. return getCouldNotCompute();
  4712. // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
  4713. // Check to see if X is a loop variant variable value now.
  4714. const SCEV *Idx = getSCEV(VarIdx);
  4715. Idx = getSCEVAtScope(Idx, L);
  4716. // We can only recognize very limited forms of loop index expressions, in
  4717. // particular, only affine AddRec's like {C1,+,C2}.
  4718. const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
  4719. if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
  4720. !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
  4721. !isa<SCEVConstant>(IdxExpr->getOperand(1)))
  4722. return getCouldNotCompute();
  4723. unsigned MaxSteps = MaxBruteForceIterations;
  4724. for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
  4725. ConstantInt *ItCst = ConstantInt::get(
  4726. cast<IntegerType>(IdxExpr->getType()), IterationNum);
  4727. ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
  4728. // Form the GEP offset.
  4729. Indexes[VarIdxNum] = Val;
  4730. Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
  4731. Indexes);
  4732. if (!Result) break; // Cannot compute!
  4733. // Evaluate the condition for this iteration.
  4734. Result = ConstantExpr::getICmp(predicate, Result, RHS);
  4735. if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
  4736. if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
  4737. #if 0
  4738. dbgs() << "\n***\n*** Computed loop count " << *ItCst
  4739. << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
  4740. << "***\n";
  4741. #endif
  4742. ++NumArrayLenItCounts;
  4743. return getConstant(ItCst); // Found terminating iteration!
  4744. }
  4745. }
  4746. return getCouldNotCompute();
  4747. }
  4748. /// CanConstantFold - Return true if we can constant fold an instruction of the
  4749. /// specified type, assuming that all operands were constants.
  4750. static bool CanConstantFold(const Instruction *I) {
  4751. if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
  4752. isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
  4753. isa<LoadInst>(I))
  4754. return true;
  4755. if (const CallInst *CI = dyn_cast<CallInst>(I))
  4756. if (const Function *F = CI->getCalledFunction())
  4757. return canConstantFoldCallTo(F);
  4758. return false;
  4759. }
  4760. /// Determine whether this instruction can constant evolve within this loop
  4761. /// assuming its operands can all constant evolve.
  4762. static bool canConstantEvolve(Instruction *I, const Loop *L) {
  4763. // An instruction outside of the loop can't be derived from a loop PHI.
  4764. if (!L->contains(I)) return false;
  4765. if (isa<PHINode>(I)) {
  4766. // We don't currently keep track of the control flow needed to evaluate
  4767. // PHIs, so we cannot handle PHIs inside of loops.
  4768. return L->getHeader() == I->getParent();
  4769. }
  4770. // If we won't be able to constant fold this expression even if the operands
  4771. // are constants, bail early.
  4772. return CanConstantFold(I);
  4773. }
  4774. /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
  4775. /// recursing through each instruction operand until reaching a loop header phi.
  4776. static PHINode *
  4777. getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
  4778. DenseMap<Instruction *, PHINode *> &PHIMap) {
  4779. // Otherwise, we can evaluate this instruction if all of its operands are
  4780. // constant or derived from a PHI node themselves.
  4781. PHINode *PHI = nullptr;
  4782. for (Instruction::op_iterator OpI = UseInst->op_begin(),
  4783. OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
  4784. if (isa<Constant>(*OpI)) continue;
  4785. Instruction *OpInst = dyn_cast<Instruction>(*OpI);
  4786. if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
  4787. PHINode *P = dyn_cast<PHINode>(OpInst);
  4788. if (!P)
  4789. // If this operand is already visited, reuse the prior result.
  4790. // We may have P != PHI if this is the deepest point at which the
  4791. // inconsistent paths meet.
  4792. P = PHIMap.lookup(OpInst);
  4793. if (!P) {
  4794. // Recurse and memoize the results, whether a phi is found or not.
  4795. // This recursive call invalidates pointers into PHIMap.
  4796. P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
  4797. PHIMap[OpInst] = P;
  4798. }
  4799. if (!P)
  4800. return nullptr; // Not evolving from PHI
  4801. if (PHI && PHI != P)
  4802. return nullptr; // Evolving from multiple different PHIs.
  4803. PHI = P;
  4804. }
  4805. // This is a expression evolving from a constant PHI!
  4806. return PHI;
  4807. }
  4808. /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
  4809. /// in the loop that V is derived from. We allow arbitrary operations along the
  4810. /// way, but the operands of an operation must either be constants or a value
  4811. /// derived from a constant PHI. If this expression does not fit with these
  4812. /// constraints, return null.
  4813. static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
  4814. Instruction *I = dyn_cast<Instruction>(V);
  4815. if (!I || !canConstantEvolve(I, L)) return nullptr;
  4816. if (PHINode *PN = dyn_cast<PHINode>(I)) {
  4817. return PN;
  4818. }
  4819. // Record non-constant instructions contained by the loop.
  4820. DenseMap<Instruction *, PHINode *> PHIMap;
  4821. return getConstantEvolvingPHIOperands(I, L, PHIMap);
  4822. }
  4823. /// EvaluateExpression - Given an expression that passes the
  4824. /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
  4825. /// in the loop has the value PHIVal. If we can't fold this expression for some
  4826. /// reason, return null.
  4827. static Constant *EvaluateExpression(Value *V, const Loop *L,
  4828. DenseMap<Instruction *, Constant *> &Vals,
  4829. const DataLayout &DL,
  4830. const TargetLibraryInfo *TLI) {
  4831. // Convenient constant check, but redundant for recursive calls.
  4832. if (Constant *C = dyn_cast<Constant>(V)) return C;
  4833. Instruction *I = dyn_cast<Instruction>(V);
  4834. if (!I) return nullptr;
  4835. if (Constant *C = Vals.lookup(I)) return C;
  4836. // An instruction inside the loop depends on a value outside the loop that we
  4837. // weren't given a mapping for, or a value such as a call inside the loop.
  4838. if (!canConstantEvolve(I, L)) return nullptr;
  4839. // An unmapped PHI can be due to a branch or another loop inside this loop,
  4840. // or due to this not being the initial iteration through a loop where we
  4841. // couldn't compute the evolution of this particular PHI last time.
  4842. if (isa<PHINode>(I)) return nullptr;
  4843. std::vector<Constant*> Operands(I->getNumOperands());
  4844. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  4845. Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
  4846. if (!Operand) {
  4847. Operands[i] = dyn_cast<Constant>(I->getOperand(i));
  4848. if (!Operands[i]) return nullptr;
  4849. continue;
  4850. }
  4851. Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
  4852. Vals[Operand] = C;
  4853. if (!C) return nullptr;
  4854. Operands[i] = C;
  4855. }
  4856. if (CmpInst *CI = dyn_cast<CmpInst>(I))
  4857. return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
  4858. Operands[1], DL, TLI);
  4859. if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
  4860. if (!LI->isVolatile())
  4861. return ConstantFoldLoadFromConstPtr(Operands[0], DL);
  4862. }
  4863. return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL,
  4864. TLI);
  4865. }
  4866. /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
  4867. /// in the header of its containing loop, we know the loop executes a
  4868. /// constant number of times, and the PHI node is just a recurrence
  4869. /// involving constants, fold it.
  4870. Constant *
  4871. ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
  4872. const APInt &BEs,
  4873. const Loop *L) {
  4874. DenseMap<PHINode*, Constant*>::const_iterator I =
  4875. ConstantEvolutionLoopExitValue.find(PN);
  4876. if (I != ConstantEvolutionLoopExitValue.end())
  4877. return I->second;
  4878. if (BEs.ugt(MaxBruteForceIterations))
  4879. return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
  4880. Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
  4881. DenseMap<Instruction *, Constant *> CurrentIterVals;
  4882. BasicBlock *Header = L->getHeader();
  4883. assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
  4884. // Since the loop is canonicalized, the PHI node must have two entries. One
  4885. // entry must be a constant (coming in from outside of the loop), and the
  4886. // second must be derived from the same PHI.
  4887. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
  4888. PHINode *PHI = nullptr;
  4889. for (BasicBlock::iterator I = Header->begin();
  4890. (PHI = dyn_cast<PHINode>(I)); ++I) {
  4891. Constant *StartCST =
  4892. dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
  4893. if (!StartCST) continue;
  4894. CurrentIterVals[PHI] = StartCST;
  4895. }
  4896. if (!CurrentIterVals.count(PN))
  4897. return RetVal = nullptr;
  4898. Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
  4899. // Execute the loop symbolically to determine the exit value.
  4900. if (BEs.getActiveBits() >= 32)
  4901. return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it!
  4902. unsigned NumIterations = BEs.getZExtValue(); // must be in range
  4903. unsigned IterationNum = 0;
  4904. const DataLayout &DL = F->getParent()->getDataLayout();
  4905. for (; ; ++IterationNum) {
  4906. if (IterationNum == NumIterations)
  4907. return RetVal = CurrentIterVals[PN]; // Got exit value!
  4908. // Compute the value of the PHIs for the next iteration.
  4909. // EvaluateExpression adds non-phi values to the CurrentIterVals map.
  4910. DenseMap<Instruction *, Constant *> NextIterVals;
  4911. Constant *NextPHI =
  4912. EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
  4913. if (!NextPHI)
  4914. return nullptr; // Couldn't evaluate!
  4915. NextIterVals[PN] = NextPHI;
  4916. bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
  4917. // Also evaluate the other PHI nodes. However, we don't get to stop if we
  4918. // cease to be able to evaluate one of them or if they stop evolving,
  4919. // because that doesn't necessarily prevent us from computing PN.
  4920. SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
  4921. for (DenseMap<Instruction *, Constant *>::const_iterator
  4922. I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
  4923. PHINode *PHI = dyn_cast<PHINode>(I->first);
  4924. if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
  4925. PHIsToCompute.push_back(std::make_pair(PHI, I->second));
  4926. }
  4927. // We use two distinct loops because EvaluateExpression may invalidate any
  4928. // iterators into CurrentIterVals.
  4929. for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
  4930. I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
  4931. PHINode *PHI = I->first;
  4932. Constant *&NextPHI = NextIterVals[PHI];
  4933. if (!NextPHI) { // Not already computed.
  4934. Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
  4935. NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
  4936. }
  4937. if (NextPHI != I->second)
  4938. StoppedEvolving = false;
  4939. }
  4940. // If all entries in CurrentIterVals == NextIterVals then we can stop
  4941. // iterating, the loop can't continue to change.
  4942. if (StoppedEvolving)
  4943. return RetVal = CurrentIterVals[PN];
  4944. CurrentIterVals.swap(NextIterVals);
  4945. }
  4946. }
  4947. /// ComputeExitCountExhaustively - If the loop is known to execute a
  4948. /// constant number of times (the condition evolves only from constants),
  4949. /// try to evaluate a few iterations of the loop until we get the exit
  4950. /// condition gets a value of ExitWhen (true or false). If we cannot
  4951. /// evaluate the trip count of the loop, return getCouldNotCompute().
  4952. const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
  4953. Value *Cond,
  4954. bool ExitWhen) {
  4955. PHINode *PN = getConstantEvolvingPHI(Cond, L);
  4956. if (!PN) return getCouldNotCompute();
  4957. // If the loop is canonicalized, the PHI will have exactly two entries.
  4958. // That's the only form we support here.
  4959. if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
  4960. DenseMap<Instruction *, Constant *> CurrentIterVals;
  4961. BasicBlock *Header = L->getHeader();
  4962. assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
  4963. // One entry must be a constant (coming in from outside of the loop), and the
  4964. // second must be derived from the same PHI.
  4965. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
  4966. PHINode *PHI = nullptr;
  4967. for (BasicBlock::iterator I = Header->begin();
  4968. (PHI = dyn_cast<PHINode>(I)); ++I) {
  4969. Constant *StartCST =
  4970. dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
  4971. if (!StartCST) continue;
  4972. CurrentIterVals[PHI] = StartCST;
  4973. }
  4974. if (!CurrentIterVals.count(PN))
  4975. return getCouldNotCompute();
  4976. // Okay, we find a PHI node that defines the trip count of this loop. Execute
  4977. // the loop symbolically to determine when the condition gets a value of
  4978. // "ExitWhen".
  4979. unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
  4980. const DataLayout &DL = F->getParent()->getDataLayout();
  4981. for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
  4982. ConstantInt *CondVal = dyn_cast_or_null<ConstantInt>(
  4983. EvaluateExpression(Cond, L, CurrentIterVals, DL, TLI));
  4984. // Couldn't symbolically evaluate.
  4985. if (!CondVal) return getCouldNotCompute();
  4986. if (CondVal->getValue() == uint64_t(ExitWhen)) {
  4987. ++NumBruteForceTripCountsComputed;
  4988. return getConstant(Type::getInt32Ty(getContext()), IterationNum);
  4989. }
  4990. // Update all the PHI nodes for the next iteration.
  4991. DenseMap<Instruction *, Constant *> NextIterVals;
  4992. // Create a list of which PHIs we need to compute. We want to do this before
  4993. // calling EvaluateExpression on them because that may invalidate iterators
  4994. // into CurrentIterVals.
  4995. SmallVector<PHINode *, 8> PHIsToCompute;
  4996. for (DenseMap<Instruction *, Constant *>::const_iterator
  4997. I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
  4998. PHINode *PHI = dyn_cast<PHINode>(I->first);
  4999. if (!PHI || PHI->getParent() != Header) continue;
  5000. PHIsToCompute.push_back(PHI);
  5001. }
  5002. for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
  5003. E = PHIsToCompute.end(); I != E; ++I) {
  5004. PHINode *PHI = *I;
  5005. Constant *&NextPHI = NextIterVals[PHI];
  5006. if (NextPHI) continue; // Already computed!
  5007. Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
  5008. NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
  5009. }
  5010. CurrentIterVals.swap(NextIterVals);
  5011. }
  5012. // Too many iterations were needed to evaluate.
  5013. return getCouldNotCompute();
  5014. }
  5015. /// getSCEVAtScope - Return a SCEV expression for the specified value
  5016. /// at the specified scope in the program. The L value specifies a loop
  5017. /// nest to evaluate the expression at, where null is the top-level or a
  5018. /// specified loop is immediately inside of the loop.
  5019. ///
  5020. /// This method can be used to compute the exit value for a variable defined
  5021. /// in a loop by querying what the value will hold in the parent loop.
  5022. ///
  5023. /// In the case that a relevant loop exit value cannot be computed, the
  5024. /// original value V is returned.
  5025. const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
  5026. // Check to see if we've folded this expression at this loop before.
  5027. SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = ValuesAtScopes[V];
  5028. for (unsigned u = 0; u < Values.size(); u++) {
  5029. if (Values[u].first == L)
  5030. return Values[u].second ? Values[u].second : V;
  5031. }
  5032. Values.push_back(std::make_pair(L, static_cast<const SCEV *>(nullptr)));
  5033. // Otherwise compute it.
  5034. const SCEV *C = computeSCEVAtScope(V, L);
  5035. SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V];
  5036. for (unsigned u = Values2.size(); u > 0; u--) {
  5037. if (Values2[u - 1].first == L) {
  5038. Values2[u - 1].second = C;
  5039. break;
  5040. }
  5041. }
  5042. return C;
  5043. }
  5044. /// This builds up a Constant using the ConstantExpr interface. That way, we
  5045. /// will return Constants for objects which aren't represented by a
  5046. /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
  5047. /// Returns NULL if the SCEV isn't representable as a Constant.
  5048. static Constant *BuildConstantFromSCEV(const SCEV *V) {
  5049. switch (static_cast<SCEVTypes>(V->getSCEVType())) {
  5050. case scCouldNotCompute:
  5051. case scAddRecExpr:
  5052. break;
  5053. case scConstant:
  5054. return cast<SCEVConstant>(V)->getValue();
  5055. case scUnknown:
  5056. return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
  5057. case scSignExtend: {
  5058. const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
  5059. if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
  5060. return ConstantExpr::getSExt(CastOp, SS->getType());
  5061. break;
  5062. }
  5063. case scZeroExtend: {
  5064. const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
  5065. if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
  5066. return ConstantExpr::getZExt(CastOp, SZ->getType());
  5067. break;
  5068. }
  5069. case scTruncate: {
  5070. const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
  5071. if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
  5072. return ConstantExpr::getTrunc(CastOp, ST->getType());
  5073. break;
  5074. }
  5075. case scAddExpr: {
  5076. const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
  5077. if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
  5078. if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
  5079. unsigned AS = PTy->getAddressSpace();
  5080. Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
  5081. C = ConstantExpr::getBitCast(C, DestPtrTy);
  5082. }
  5083. for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
  5084. Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
  5085. if (!C2) return nullptr;
  5086. // First pointer!
  5087. if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
  5088. unsigned AS = C2->getType()->getPointerAddressSpace();
  5089. std::swap(C, C2);
  5090. Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
  5091. // The offsets have been converted to bytes. We can add bytes to an
  5092. // i8* by GEP with the byte count in the first index.
  5093. C = ConstantExpr::getBitCast(C, DestPtrTy);
  5094. }
  5095. // Don't bother trying to sum two pointers. We probably can't
  5096. // statically compute a load that results from it anyway.
  5097. if (C2->getType()->isPointerTy())
  5098. return nullptr;
  5099. if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
  5100. if (PTy->getElementType()->isStructTy())
  5101. C2 = ConstantExpr::getIntegerCast(
  5102. C2, Type::getInt32Ty(C->getContext()), true);
  5103. C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2);
  5104. } else
  5105. C = ConstantExpr::getAdd(C, C2);
  5106. }
  5107. return C;
  5108. }
  5109. break;
  5110. }
  5111. case scMulExpr: {
  5112. const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
  5113. if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
  5114. // Don't bother with pointers at all.
  5115. if (C->getType()->isPointerTy()) return nullptr;
  5116. for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
  5117. Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
  5118. if (!C2 || C2->getType()->isPointerTy()) return nullptr;
  5119. C = ConstantExpr::getMul(C, C2);
  5120. }
  5121. return C;
  5122. }
  5123. break;
  5124. }
  5125. case scUDivExpr: {
  5126. const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
  5127. if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
  5128. if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
  5129. if (LHS->getType() == RHS->getType())
  5130. return ConstantExpr::getUDiv(LHS, RHS);
  5131. break;
  5132. }
  5133. case scSMaxExpr:
  5134. case scUMaxExpr:
  5135. break; // TODO: smax, umax.
  5136. }
  5137. return nullptr;
  5138. }
  5139. const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
  5140. if (isa<SCEVConstant>(V)) return V;
  5141. // If this instruction is evolved from a constant-evolving PHI, compute the
  5142. // exit value from the loop without using SCEVs.
  5143. if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
  5144. if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
  5145. const Loop *LI = (*this->LI)[I->getParent()];
  5146. if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
  5147. if (PHINode *PN = dyn_cast<PHINode>(I))
  5148. if (PN->getParent() == LI->getHeader()) {
  5149. // Okay, there is no closed form solution for the PHI node. Check
  5150. // to see if the loop that contains it has a known backedge-taken
  5151. // count. If so, we may be able to force computation of the exit
  5152. // value.
  5153. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
  5154. if (const SCEVConstant *BTCC =
  5155. dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
  5156. // Okay, we know how many times the containing loop executes. If
  5157. // this is a constant evolving PHI node, get the final value at
  5158. // the specified iteration number.
  5159. Constant *RV = getConstantEvolutionLoopExitValue(PN,
  5160. BTCC->getValue()->getValue(),
  5161. LI);
  5162. if (RV) return getSCEV(RV);
  5163. }
  5164. }
  5165. // Okay, this is an expression that we cannot symbolically evaluate
  5166. // into a SCEV. Check to see if it's possible to symbolically evaluate
  5167. // the arguments into constants, and if so, try to constant propagate the
  5168. // result. This is particularly useful for computing loop exit values.
  5169. if (CanConstantFold(I)) {
  5170. SmallVector<Constant *, 4> Operands;
  5171. bool MadeImprovement = false;
  5172. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  5173. Value *Op = I->getOperand(i);
  5174. if (Constant *C = dyn_cast<Constant>(Op)) {
  5175. Operands.push_back(C);
  5176. continue;
  5177. }
  5178. // If any of the operands is non-constant and if they are
  5179. // non-integer and non-pointer, don't even try to analyze them
  5180. // with scev techniques.
  5181. if (!isSCEVable(Op->getType()))
  5182. return V;
  5183. const SCEV *OrigV = getSCEV(Op);
  5184. const SCEV *OpV = getSCEVAtScope(OrigV, L);
  5185. MadeImprovement |= OrigV != OpV;
  5186. Constant *C = BuildConstantFromSCEV(OpV);
  5187. if (!C) return V;
  5188. if (C->getType() != Op->getType())
  5189. C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
  5190. Op->getType(),
  5191. false),
  5192. C, Op->getType());
  5193. Operands.push_back(C);
  5194. }
  5195. // Check to see if getSCEVAtScope actually made an improvement.
  5196. if (MadeImprovement) {
  5197. Constant *C = nullptr;
  5198. const DataLayout &DL = F->getParent()->getDataLayout();
  5199. if (const CmpInst *CI = dyn_cast<CmpInst>(I))
  5200. C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
  5201. Operands[1], DL, TLI);
  5202. else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
  5203. if (!LI->isVolatile())
  5204. C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
  5205. } else
  5206. C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands,
  5207. DL, TLI);
  5208. if (!C) return V;
  5209. return getSCEV(C);
  5210. }
  5211. }
  5212. }
  5213. // This is some other type of SCEVUnknown, just return it.
  5214. return V;
  5215. }
  5216. if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
  5217. // Avoid performing the look-up in the common case where the specified
  5218. // expression has no loop-variant portions.
  5219. for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
  5220. const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
  5221. if (OpAtScope != Comm->getOperand(i)) {
  5222. // Okay, at least one of these operands is loop variant but might be
  5223. // foldable. Build a new instance of the folded commutative expression.
  5224. SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
  5225. Comm->op_begin()+i);
  5226. NewOps.push_back(OpAtScope);
  5227. for (++i; i != e; ++i) {
  5228. OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
  5229. NewOps.push_back(OpAtScope);
  5230. }
  5231. if (isa<SCEVAddExpr>(Comm))
  5232. return getAddExpr(NewOps);
  5233. if (isa<SCEVMulExpr>(Comm))
  5234. return getMulExpr(NewOps);
  5235. if (isa<SCEVSMaxExpr>(Comm))
  5236. return getSMaxExpr(NewOps);
  5237. if (isa<SCEVUMaxExpr>(Comm))
  5238. return getUMaxExpr(NewOps);
  5239. llvm_unreachable("Unknown commutative SCEV type!");
  5240. }
  5241. }
  5242. // If we got here, all operands are loop invariant.
  5243. return Comm;
  5244. }
  5245. if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
  5246. const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
  5247. const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
  5248. if (LHS == Div->getLHS() && RHS == Div->getRHS())
  5249. return Div; // must be loop invariant
  5250. return getUDivExpr(LHS, RHS);
  5251. }
  5252. // If this is a loop recurrence for a loop that does not contain L, then we
  5253. // are dealing with the final value computed by the loop.
  5254. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
  5255. // First, attempt to evaluate each operand.
  5256. // Avoid performing the look-up in the common case where the specified
  5257. // expression has no loop-variant portions.
  5258. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
  5259. const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
  5260. if (OpAtScope == AddRec->getOperand(i))
  5261. continue;
  5262. // Okay, at least one of these operands is loop variant but might be
  5263. // foldable. Build a new instance of the folded commutative expression.
  5264. SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
  5265. AddRec->op_begin()+i);
  5266. NewOps.push_back(OpAtScope);
  5267. for (++i; i != e; ++i)
  5268. NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
  5269. const SCEV *FoldedRec =
  5270. getAddRecExpr(NewOps, AddRec->getLoop(),
  5271. AddRec->getNoWrapFlags(SCEV::FlagNW));
  5272. AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
  5273. // The addrec may be folded to a nonrecurrence, for example, if the
  5274. // induction variable is multiplied by zero after constant folding. Go
  5275. // ahead and return the folded value.
  5276. if (!AddRec)
  5277. return FoldedRec;
  5278. break;
  5279. }
  5280. // If the scope is outside the addrec's loop, evaluate it by using the
  5281. // loop exit value of the addrec.
  5282. if (!AddRec->getLoop()->contains(L)) {
  5283. // To evaluate this recurrence, we need to know how many times the AddRec
  5284. // loop iterates. Compute this now.
  5285. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
  5286. if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
  5287. // Then, evaluate the AddRec.
  5288. return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
  5289. }
  5290. return AddRec;
  5291. }
  5292. if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
  5293. const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
  5294. if (Op == Cast->getOperand())
  5295. return Cast; // must be loop invariant
  5296. return getZeroExtendExpr(Op, Cast->getType());
  5297. }
  5298. if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
  5299. const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
  5300. if (Op == Cast->getOperand())
  5301. return Cast; // must be loop invariant
  5302. return getSignExtendExpr(Op, Cast->getType());
  5303. }
  5304. if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
  5305. const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
  5306. if (Op == Cast->getOperand())
  5307. return Cast; // must be loop invariant
  5308. return getTruncateExpr(Op, Cast->getType());
  5309. }
  5310. llvm_unreachable("Unknown SCEV type!");
  5311. }
  5312. /// getSCEVAtScope - This is a convenience function which does
  5313. /// getSCEVAtScope(getSCEV(V), L).
  5314. const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
  5315. return getSCEVAtScope(getSCEV(V), L);
  5316. }
  5317. /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
  5318. /// following equation:
  5319. ///
  5320. /// A * X = B (mod N)
  5321. ///
  5322. /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
  5323. /// A and B isn't important.
  5324. ///
  5325. /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
  5326. static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
  5327. ScalarEvolution &SE) {
  5328. uint32_t BW = A.getBitWidth();
  5329. assert(BW == B.getBitWidth() && "Bit widths must be the same.");
  5330. assert(A != 0 && "A must be non-zero.");
  5331. // 1. D = gcd(A, N)
  5332. //
  5333. // The gcd of A and N may have only one prime factor: 2. The number of
  5334. // trailing zeros in A is its multiplicity
  5335. uint32_t Mult2 = A.countTrailingZeros();
  5336. // D = 2^Mult2
  5337. // 2. Check if B is divisible by D.
  5338. //
  5339. // B is divisible by D if and only if the multiplicity of prime factor 2 for B
  5340. // is not less than multiplicity of this prime factor for D.
  5341. if (B.countTrailingZeros() < Mult2)
  5342. return SE.getCouldNotCompute();
  5343. // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
  5344. // modulo (N / D).
  5345. //
  5346. // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
  5347. // bit width during computations.
  5348. APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
  5349. APInt Mod(BW + 1, 0);
  5350. Mod.setBit(BW - Mult2); // Mod = N / D
  5351. APInt I = AD.multiplicativeInverse(Mod);
  5352. // 4. Compute the minimum unsigned root of the equation:
  5353. // I * (B / D) mod (N / D)
  5354. APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
  5355. // The result is guaranteed to be less than 2^BW so we may truncate it to BW
  5356. // bits.
  5357. return SE.getConstant(Result.trunc(BW));
  5358. }
  5359. /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
  5360. /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
  5361. /// might be the same) or two SCEVCouldNotCompute objects.
  5362. ///
  5363. static std::pair<const SCEV *,const SCEV *>
  5364. SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
  5365. assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
  5366. const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
  5367. const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
  5368. const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
  5369. // We currently can only solve this if the coefficients are constants.
  5370. if (!LC || !MC || !NC) {
  5371. const SCEV *CNC = SE.getCouldNotCompute();
  5372. return std::make_pair(CNC, CNC);
  5373. }
  5374. uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
  5375. const APInt &L = LC->getValue()->getValue();
  5376. const APInt &M = MC->getValue()->getValue();
  5377. const APInt &N = NC->getValue()->getValue();
  5378. APInt Two(BitWidth, 2);
  5379. APInt Four(BitWidth, 4);
  5380. {
  5381. using namespace APIntOps;
  5382. const APInt& C = L;
  5383. // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
  5384. // The B coefficient is M-N/2
  5385. APInt B(M);
  5386. B -= sdiv(N,Two);
  5387. // The A coefficient is N/2
  5388. APInt A(N.sdiv(Two));
  5389. // Compute the B^2-4ac term.
  5390. APInt SqrtTerm(B);
  5391. SqrtTerm *= B;
  5392. SqrtTerm -= Four * (A * C);
  5393. if (SqrtTerm.isNegative()) {
  5394. // The loop is provably infinite.
  5395. const SCEV *CNC = SE.getCouldNotCompute();
  5396. return std::make_pair(CNC, CNC);
  5397. }
  5398. // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
  5399. // integer value or else APInt::sqrt() will assert.
  5400. APInt SqrtVal(SqrtTerm.sqrt());
  5401. // Compute the two solutions for the quadratic formula.
  5402. // The divisions must be performed as signed divisions.
  5403. APInt NegB(-B);
  5404. APInt TwoA(A << 1);
  5405. if (TwoA.isMinValue()) {
  5406. const SCEV *CNC = SE.getCouldNotCompute();
  5407. return std::make_pair(CNC, CNC);
  5408. }
  5409. LLVMContext &Context = SE.getContext();
  5410. ConstantInt *Solution1 =
  5411. ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
  5412. ConstantInt *Solution2 =
  5413. ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
  5414. return std::make_pair(SE.getConstant(Solution1),
  5415. SE.getConstant(Solution2));
  5416. } // end APIntOps namespace
  5417. }
  5418. /// HowFarToZero - Return the number of times a backedge comparing the specified
  5419. /// value to zero will execute. If not computable, return CouldNotCompute.
  5420. ///
  5421. /// This is only used for loops with a "x != y" exit test. The exit condition is
  5422. /// now expressed as a single expression, V = x-y. So the exit test is
  5423. /// effectively V != 0. We know and take advantage of the fact that this
  5424. /// expression only being used in a comparison by zero context.
  5425. ScalarEvolution::ExitLimit
  5426. ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool ControlsExit) {
  5427. // If the value is a constant
  5428. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
  5429. // If the value is already zero, the branch will execute zero times.
  5430. if (C->getValue()->isZero()) return C;
  5431. return getCouldNotCompute(); // Otherwise it will loop infinitely.
  5432. }
  5433. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
  5434. if (!AddRec || AddRec->getLoop() != L)
  5435. return getCouldNotCompute();
  5436. // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
  5437. // the quadratic equation to solve it.
  5438. if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
  5439. std::pair<const SCEV *,const SCEV *> Roots =
  5440. SolveQuadraticEquation(AddRec, *this);
  5441. const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
  5442. const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
  5443. if (R1 && R2) {
  5444. #if 0
  5445. dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
  5446. << " sol#2: " << *R2 << "\n";
  5447. #endif
  5448. // Pick the smallest positive root value.
  5449. if (ConstantInt *CB =
  5450. dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
  5451. R1->getValue(),
  5452. R2->getValue()))) {
  5453. if (!CB->getZExtValue())
  5454. std::swap(R1, R2); // R1 is the minimum root now.
  5455. // We can only use this value if the chrec ends up with an exact zero
  5456. // value at this index. When solving for "X*X != 5", for example, we
  5457. // should not accept a root of 2.
  5458. const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
  5459. if (Val->isZero())
  5460. return R1; // We found a quadratic root!
  5461. }
  5462. }
  5463. return getCouldNotCompute();
  5464. }
  5465. // Otherwise we can only handle this if it is affine.
  5466. if (!AddRec->isAffine())
  5467. return getCouldNotCompute();
  5468. // If this is an affine expression, the execution count of this branch is
  5469. // the minimum unsigned root of the following equation:
  5470. //
  5471. // Start + Step*N = 0 (mod 2^BW)
  5472. //
  5473. // equivalent to:
  5474. //
  5475. // Step*N = -Start (mod 2^BW)
  5476. //
  5477. // where BW is the common bit width of Start and Step.
  5478. // Get the initial value for the loop.
  5479. const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
  5480. const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
  5481. // For now we handle only constant steps.
  5482. //
  5483. // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
  5484. // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
  5485. // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
  5486. // We have not yet seen any such cases.
  5487. const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
  5488. if (!StepC || StepC->getValue()->equalsInt(0))
  5489. return getCouldNotCompute();
  5490. // For positive steps (counting up until unsigned overflow):
  5491. // N = -Start/Step (as unsigned)
  5492. // For negative steps (counting down to zero):
  5493. // N = Start/-Step
  5494. // First compute the unsigned distance from zero in the direction of Step.
  5495. bool CountDown = StepC->getValue()->getValue().isNegative();
  5496. const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
  5497. // Handle unitary steps, which cannot wraparound.
  5498. // 1*N = -Start; -1*N = Start (mod 2^BW), so:
  5499. // N = Distance (as unsigned)
  5500. if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
  5501. ConstantRange CR = getUnsignedRange(Start);
  5502. const SCEV *MaxBECount;
  5503. if (!CountDown && CR.getUnsignedMin().isMinValue())
  5504. // When counting up, the worst starting value is 1, not 0.
  5505. MaxBECount = CR.getUnsignedMax().isMinValue()
  5506. ? getConstant(APInt::getMinValue(CR.getBitWidth()))
  5507. : getConstant(APInt::getMaxValue(CR.getBitWidth()));
  5508. else
  5509. MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
  5510. : -CR.getUnsignedMin());
  5511. return ExitLimit(Distance, MaxBECount);
  5512. }
  5513. // As a special case, handle the instance where Step is a positive power of
  5514. // two. In this case, determining whether Step divides Distance evenly can be
  5515. // done by counting and comparing the number of trailing zeros of Step and
  5516. // Distance.
  5517. if (!CountDown) {
  5518. const APInt &StepV = StepC->getValue()->getValue();
  5519. // StepV.isPowerOf2() returns true if StepV is an positive power of two. It
  5520. // also returns true if StepV is maximally negative (eg, INT_MIN), but that
  5521. // case is not handled as this code is guarded by !CountDown.
  5522. if (StepV.isPowerOf2() &&
  5523. GetMinTrailingZeros(Distance) >= StepV.countTrailingZeros())
  5524. return getUDivExactExpr(Distance, Step);
  5525. }
  5526. // If the condition controls loop exit (the loop exits only if the expression
  5527. // is true) and the addition is no-wrap we can use unsigned divide to
  5528. // compute the backedge count. In this case, the step may not divide the
  5529. // distance, but we don't care because if the condition is "missed" the loop
  5530. // will have undefined behavior due to wrapping.
  5531. if (ControlsExit && AddRec->getNoWrapFlags(SCEV::FlagNW)) {
  5532. const SCEV *Exact =
  5533. getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
  5534. return ExitLimit(Exact, Exact);
  5535. }
  5536. // Then, try to solve the above equation provided that Start is constant.
  5537. if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
  5538. return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
  5539. -StartC->getValue()->getValue(),
  5540. *this);
  5541. return getCouldNotCompute();
  5542. }
  5543. /// HowFarToNonZero - Return the number of times a backedge checking the
  5544. /// specified value for nonzero will execute. If not computable, return
  5545. /// CouldNotCompute
  5546. ScalarEvolution::ExitLimit
  5547. ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
  5548. // Loops that look like: while (X == 0) are very strange indeed. We don't
  5549. // handle them yet except for the trivial case. This could be expanded in the
  5550. // future as needed.
  5551. // If the value is a constant, check to see if it is known to be non-zero
  5552. // already. If so, the backedge will execute zero times.
  5553. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
  5554. if (!C->getValue()->isNullValue())
  5555. return getConstant(C->getType(), 0);
  5556. return getCouldNotCompute(); // Otherwise it will loop infinitely.
  5557. }
  5558. // We could implement others, but I really doubt anyone writes loops like
  5559. // this, and if they did, they would already be constant folded.
  5560. return getCouldNotCompute();
  5561. }
  5562. /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
  5563. /// (which may not be an immediate predecessor) which has exactly one
  5564. /// successor from which BB is reachable, or null if no such block is
  5565. /// found.
  5566. ///
  5567. std::pair<BasicBlock *, BasicBlock *>
  5568. ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
  5569. // If the block has a unique predecessor, then there is no path from the
  5570. // predecessor to the block that does not go through the direct edge
  5571. // from the predecessor to the block.
  5572. if (BasicBlock *Pred = BB->getSinglePredecessor())
  5573. return std::make_pair(Pred, BB);
  5574. // A loop's header is defined to be a block that dominates the loop.
  5575. // If the header has a unique predecessor outside the loop, it must be
  5576. // a block that has exactly one successor that can reach the loop.
  5577. if (Loop *L = LI->getLoopFor(BB))
  5578. return std::make_pair(L->getLoopPredecessor(), L->getHeader());
  5579. return std::pair<BasicBlock *, BasicBlock *>();
  5580. }
  5581. /// HasSameValue - SCEV structural equivalence is usually sufficient for
  5582. /// testing whether two expressions are equal, however for the purposes of
  5583. /// looking for a condition guarding a loop, it can be useful to be a little
  5584. /// more general, since a front-end may have replicated the controlling
  5585. /// expression.
  5586. ///
  5587. static bool HasSameValue(const SCEV *A, const SCEV *B) {
  5588. // Quick check to see if they are the same SCEV.
  5589. if (A == B) return true;
  5590. // Otherwise, if they're both SCEVUnknown, it's possible that they hold
  5591. // two different instructions with the same value. Check for this case.
  5592. if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
  5593. if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
  5594. if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
  5595. if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
  5596. if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
  5597. return true;
  5598. // Otherwise assume they may have a different value.
  5599. return false;
  5600. }
  5601. /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
  5602. /// predicate Pred. Return true iff any changes were made.
  5603. ///
  5604. bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
  5605. const SCEV *&LHS, const SCEV *&RHS,
  5606. unsigned Depth) {
  5607. bool Changed = false;
  5608. // If we hit the max recursion limit bail out.
  5609. if (Depth >= 3)
  5610. return false;
  5611. // Canonicalize a constant to the right side.
  5612. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
  5613. // Check for both operands constant.
  5614. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
  5615. if (ConstantExpr::getICmp(Pred,
  5616. LHSC->getValue(),
  5617. RHSC->getValue())->isNullValue())
  5618. goto trivially_false;
  5619. else
  5620. goto trivially_true;
  5621. }
  5622. // Otherwise swap the operands to put the constant on the right.
  5623. std::swap(LHS, RHS);
  5624. Pred = ICmpInst::getSwappedPredicate(Pred);
  5625. Changed = true;
  5626. }
  5627. // If we're comparing an addrec with a value which is loop-invariant in the
  5628. // addrec's loop, put the addrec on the left. Also make a dominance check,
  5629. // as both operands could be addrecs loop-invariant in each other's loop.
  5630. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
  5631. const Loop *L = AR->getLoop();
  5632. if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
  5633. std::swap(LHS, RHS);
  5634. Pred = ICmpInst::getSwappedPredicate(Pred);
  5635. Changed = true;
  5636. }
  5637. }
  5638. // If there's a constant operand, canonicalize comparisons with boundary
  5639. // cases, and canonicalize *-or-equal comparisons to regular comparisons.
  5640. if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
  5641. const APInt &RA = RC->getValue()->getValue();
  5642. switch (Pred) {
  5643. default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  5644. case ICmpInst::ICMP_EQ:
  5645. case ICmpInst::ICMP_NE:
  5646. // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
  5647. if (!RA)
  5648. if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
  5649. if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
  5650. if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
  5651. ME->getOperand(0)->isAllOnesValue()) {
  5652. RHS = AE->getOperand(1);
  5653. LHS = ME->getOperand(1);
  5654. Changed = true;
  5655. }
  5656. break;
  5657. case ICmpInst::ICMP_UGE:
  5658. if ((RA - 1).isMinValue()) {
  5659. Pred = ICmpInst::ICMP_NE;
  5660. RHS = getConstant(RA - 1);
  5661. Changed = true;
  5662. break;
  5663. }
  5664. if (RA.isMaxValue()) {
  5665. Pred = ICmpInst::ICMP_EQ;
  5666. Changed = true;
  5667. break;
  5668. }
  5669. if (RA.isMinValue()) goto trivially_true;
  5670. Pred = ICmpInst::ICMP_UGT;
  5671. RHS = getConstant(RA - 1);
  5672. Changed = true;
  5673. break;
  5674. case ICmpInst::ICMP_ULE:
  5675. if ((RA + 1).isMaxValue()) {
  5676. Pred = ICmpInst::ICMP_NE;
  5677. RHS = getConstant(RA + 1);
  5678. Changed = true;
  5679. break;
  5680. }
  5681. if (RA.isMinValue()) {
  5682. Pred = ICmpInst::ICMP_EQ;
  5683. Changed = true;
  5684. break;
  5685. }
  5686. if (RA.isMaxValue()) goto trivially_true;
  5687. Pred = ICmpInst::ICMP_ULT;
  5688. RHS = getConstant(RA + 1);
  5689. Changed = true;
  5690. break;
  5691. case ICmpInst::ICMP_SGE:
  5692. if ((RA - 1).isMinSignedValue()) {
  5693. Pred = ICmpInst::ICMP_NE;
  5694. RHS = getConstant(RA - 1);
  5695. Changed = true;
  5696. break;
  5697. }
  5698. if (RA.isMaxSignedValue()) {
  5699. Pred = ICmpInst::ICMP_EQ;
  5700. Changed = true;
  5701. break;
  5702. }
  5703. if (RA.isMinSignedValue()) goto trivially_true;
  5704. Pred = ICmpInst::ICMP_SGT;
  5705. RHS = getConstant(RA - 1);
  5706. Changed = true;
  5707. break;
  5708. case ICmpInst::ICMP_SLE:
  5709. if ((RA + 1).isMaxSignedValue()) {
  5710. Pred = ICmpInst::ICMP_NE;
  5711. RHS = getConstant(RA + 1);
  5712. Changed = true;
  5713. break;
  5714. }
  5715. if (RA.isMinSignedValue()) {
  5716. Pred = ICmpInst::ICMP_EQ;
  5717. Changed = true;
  5718. break;
  5719. }
  5720. if (RA.isMaxSignedValue()) goto trivially_true;
  5721. Pred = ICmpInst::ICMP_SLT;
  5722. RHS = getConstant(RA + 1);
  5723. Changed = true;
  5724. break;
  5725. case ICmpInst::ICMP_UGT:
  5726. if (RA.isMinValue()) {
  5727. Pred = ICmpInst::ICMP_NE;
  5728. Changed = true;
  5729. break;
  5730. }
  5731. if ((RA + 1).isMaxValue()) {
  5732. Pred = ICmpInst::ICMP_EQ;
  5733. RHS = getConstant(RA + 1);
  5734. Changed = true;
  5735. break;
  5736. }
  5737. if (RA.isMaxValue()) goto trivially_false;
  5738. break;
  5739. case ICmpInst::ICMP_ULT:
  5740. if (RA.isMaxValue()) {
  5741. Pred = ICmpInst::ICMP_NE;
  5742. Changed = true;
  5743. break;
  5744. }
  5745. if ((RA - 1).isMinValue()) {
  5746. Pred = ICmpInst::ICMP_EQ;
  5747. RHS = getConstant(RA - 1);
  5748. Changed = true;
  5749. break;
  5750. }
  5751. if (RA.isMinValue()) goto trivially_false;
  5752. break;
  5753. case ICmpInst::ICMP_SGT:
  5754. if (RA.isMinSignedValue()) {
  5755. Pred = ICmpInst::ICMP_NE;
  5756. Changed = true;
  5757. break;
  5758. }
  5759. if ((RA + 1).isMaxSignedValue()) {
  5760. Pred = ICmpInst::ICMP_EQ;
  5761. RHS = getConstant(RA + 1);
  5762. Changed = true;
  5763. break;
  5764. }
  5765. if (RA.isMaxSignedValue()) goto trivially_false;
  5766. break;
  5767. case ICmpInst::ICMP_SLT:
  5768. if (RA.isMaxSignedValue()) {
  5769. Pred = ICmpInst::ICMP_NE;
  5770. Changed = true;
  5771. break;
  5772. }
  5773. if ((RA - 1).isMinSignedValue()) {
  5774. Pred = ICmpInst::ICMP_EQ;
  5775. RHS = getConstant(RA - 1);
  5776. Changed = true;
  5777. break;
  5778. }
  5779. if (RA.isMinSignedValue()) goto trivially_false;
  5780. break;
  5781. }
  5782. }
  5783. // Check for obvious equality.
  5784. if (HasSameValue(LHS, RHS)) {
  5785. if (ICmpInst::isTrueWhenEqual(Pred))
  5786. goto trivially_true;
  5787. if (ICmpInst::isFalseWhenEqual(Pred))
  5788. goto trivially_false;
  5789. }
  5790. // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
  5791. // adding or subtracting 1 from one of the operands.
  5792. switch (Pred) {
  5793. case ICmpInst::ICMP_SLE:
  5794. if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
  5795. RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
  5796. SCEV::FlagNSW);
  5797. Pred = ICmpInst::ICMP_SLT;
  5798. Changed = true;
  5799. } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
  5800. LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
  5801. SCEV::FlagNSW);
  5802. Pred = ICmpInst::ICMP_SLT;
  5803. Changed = true;
  5804. }
  5805. break;
  5806. case ICmpInst::ICMP_SGE:
  5807. if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
  5808. RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
  5809. SCEV::FlagNSW);
  5810. Pred = ICmpInst::ICMP_SGT;
  5811. Changed = true;
  5812. } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
  5813. LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
  5814. SCEV::FlagNSW);
  5815. Pred = ICmpInst::ICMP_SGT;
  5816. Changed = true;
  5817. }
  5818. break;
  5819. case ICmpInst::ICMP_ULE:
  5820. if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
  5821. RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
  5822. SCEV::FlagNUW);
  5823. Pred = ICmpInst::ICMP_ULT;
  5824. Changed = true;
  5825. } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
  5826. LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
  5827. SCEV::FlagNUW);
  5828. Pred = ICmpInst::ICMP_ULT;
  5829. Changed = true;
  5830. }
  5831. break;
  5832. case ICmpInst::ICMP_UGE:
  5833. if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
  5834. RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
  5835. SCEV::FlagNUW);
  5836. Pred = ICmpInst::ICMP_UGT;
  5837. Changed = true;
  5838. } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
  5839. LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
  5840. SCEV::FlagNUW);
  5841. Pred = ICmpInst::ICMP_UGT;
  5842. Changed = true;
  5843. }
  5844. break;
  5845. default:
  5846. break;
  5847. }
  5848. // TODO: More simplifications are possible here.
  5849. // Recursively simplify until we either hit a recursion limit or nothing
  5850. // changes.
  5851. if (Changed)
  5852. return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
  5853. return Changed;
  5854. trivially_true:
  5855. // Return 0 == 0.
  5856. LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
  5857. Pred = ICmpInst::ICMP_EQ;
  5858. return true;
  5859. trivially_false:
  5860. // Return 0 != 0.
  5861. LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
  5862. Pred = ICmpInst::ICMP_NE;
  5863. return true;
  5864. }
  5865. bool ScalarEvolution::isKnownNegative(const SCEV *S) {
  5866. return getSignedRange(S).getSignedMax().isNegative();
  5867. }
  5868. bool ScalarEvolution::isKnownPositive(const SCEV *S) {
  5869. return getSignedRange(S).getSignedMin().isStrictlyPositive();
  5870. }
  5871. bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
  5872. return !getSignedRange(S).getSignedMin().isNegative();
  5873. }
  5874. bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
  5875. return !getSignedRange(S).getSignedMax().isStrictlyPositive();
  5876. }
  5877. bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
  5878. return isKnownNegative(S) || isKnownPositive(S);
  5879. }
  5880. bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
  5881. const SCEV *LHS, const SCEV *RHS) {
  5882. // Canonicalize the inputs first.
  5883. (void)SimplifyICmpOperands(Pred, LHS, RHS);
  5884. // If LHS or RHS is an addrec, check to see if the condition is true in
  5885. // every iteration of the loop.
  5886. // If LHS and RHS are both addrec, both conditions must be true in
  5887. // every iteration of the loop.
  5888. const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
  5889. const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
  5890. bool LeftGuarded = false;
  5891. bool RightGuarded = false;
  5892. if (LAR) {
  5893. const Loop *L = LAR->getLoop();
  5894. if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) &&
  5895. isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) {
  5896. if (!RAR) return true;
  5897. LeftGuarded = true;
  5898. }
  5899. }
  5900. if (RAR) {
  5901. const Loop *L = RAR->getLoop();
  5902. if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) &&
  5903. isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) {
  5904. if (!LAR) return true;
  5905. RightGuarded = true;
  5906. }
  5907. }
  5908. if (LeftGuarded && RightGuarded)
  5909. return true;
  5910. // Otherwise see what can be done with known constant ranges.
  5911. return isKnownPredicateWithRanges(Pred, LHS, RHS);
  5912. }
  5913. bool
  5914. ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
  5915. const SCEV *LHS, const SCEV *RHS) {
  5916. if (HasSameValue(LHS, RHS))
  5917. return ICmpInst::isTrueWhenEqual(Pred);
  5918. // This code is split out from isKnownPredicate because it is called from
  5919. // within isLoopEntryGuardedByCond.
  5920. switch (Pred) {
  5921. default:
  5922. llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  5923. case ICmpInst::ICMP_SGT:
  5924. std::swap(LHS, RHS);
  5925. case ICmpInst::ICMP_SLT: {
  5926. ConstantRange LHSRange = getSignedRange(LHS);
  5927. ConstantRange RHSRange = getSignedRange(RHS);
  5928. if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
  5929. return true;
  5930. if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
  5931. return false;
  5932. break;
  5933. }
  5934. case ICmpInst::ICMP_SGE:
  5935. std::swap(LHS, RHS);
  5936. case ICmpInst::ICMP_SLE: {
  5937. ConstantRange LHSRange = getSignedRange(LHS);
  5938. ConstantRange RHSRange = getSignedRange(RHS);
  5939. if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
  5940. return true;
  5941. if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
  5942. return false;
  5943. break;
  5944. }
  5945. case ICmpInst::ICMP_UGT:
  5946. std::swap(LHS, RHS);
  5947. case ICmpInst::ICMP_ULT: {
  5948. ConstantRange LHSRange = getUnsignedRange(LHS);
  5949. ConstantRange RHSRange = getUnsignedRange(RHS);
  5950. if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
  5951. return true;
  5952. if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
  5953. return false;
  5954. break;
  5955. }
  5956. case ICmpInst::ICMP_UGE:
  5957. std::swap(LHS, RHS);
  5958. case ICmpInst::ICMP_ULE: {
  5959. ConstantRange LHSRange = getUnsignedRange(LHS);
  5960. ConstantRange RHSRange = getUnsignedRange(RHS);
  5961. if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
  5962. return true;
  5963. if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
  5964. return false;
  5965. break;
  5966. }
  5967. case ICmpInst::ICMP_NE: {
  5968. if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
  5969. return true;
  5970. if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
  5971. return true;
  5972. const SCEV *Diff = getMinusSCEV(LHS, RHS);
  5973. if (isKnownNonZero(Diff))
  5974. return true;
  5975. break;
  5976. }
  5977. case ICmpInst::ICMP_EQ:
  5978. // The check at the top of the function catches the case where
  5979. // the values are known to be equal.
  5980. break;
  5981. }
  5982. return false;
  5983. }
  5984. /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
  5985. /// protected by a conditional between LHS and RHS. This is used to
  5986. /// to eliminate casts.
  5987. bool
  5988. ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
  5989. ICmpInst::Predicate Pred,
  5990. const SCEV *LHS, const SCEV *RHS) {
  5991. // Interpret a null as meaning no loop, where there is obviously no guard
  5992. // (interprocedural conditions notwithstanding).
  5993. if (!L) return true;
  5994. if (isKnownPredicateWithRanges(Pred, LHS, RHS)) return true;
  5995. BasicBlock *Latch = L->getLoopLatch();
  5996. if (!Latch)
  5997. return false;
  5998. BranchInst *LoopContinuePredicate =
  5999. dyn_cast<BranchInst>(Latch->getTerminator());
  6000. if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
  6001. isImpliedCond(Pred, LHS, RHS,
  6002. LoopContinuePredicate->getCondition(),
  6003. LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
  6004. return true;
  6005. // Check conditions due to any @llvm.assume intrinsics.
  6006. for (auto &AssumeVH : AC->assumptions()) {
  6007. if (!AssumeVH)
  6008. continue;
  6009. auto *CI = cast<CallInst>(AssumeVH);
  6010. if (!DT->dominates(CI, Latch->getTerminator()))
  6011. continue;
  6012. if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
  6013. return true;
  6014. }
  6015. struct ClearWalkingBEDominatingCondsOnExit {
  6016. ScalarEvolution &SE;
  6017. explicit ClearWalkingBEDominatingCondsOnExit(ScalarEvolution &SE)
  6018. : SE(SE){};
  6019. ~ClearWalkingBEDominatingCondsOnExit() {
  6020. SE.WalkingBEDominatingConds = false;
  6021. }
  6022. };
  6023. // We don't want more than one activation of the following loop on the stack
  6024. // -- that can lead to O(n!) time complexity.
  6025. if (WalkingBEDominatingConds)
  6026. return false;
  6027. WalkingBEDominatingConds = true;
  6028. ClearWalkingBEDominatingCondsOnExit ClearOnExit(*this);
  6029. // If the loop is not reachable from the entry block, we risk running into an
  6030. // infinite loop as we walk up into the dom tree. These loops do not matter
  6031. // anyway, so we just return a conservative answer when we see them.
  6032. if (!DT->isReachableFromEntry(L->getHeader()))
  6033. return false;
  6034. for (DomTreeNode *DTN = (*DT)[Latch], *HeaderDTN = (*DT)[L->getHeader()];
  6035. DTN != HeaderDTN;
  6036. DTN = DTN->getIDom()) {
  6037. assert(DTN && "should reach the loop header before reaching the root!");
  6038. BasicBlock *BB = DTN->getBlock();
  6039. BasicBlock *PBB = BB->getSinglePredecessor();
  6040. if (!PBB)
  6041. continue;
  6042. BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
  6043. if (!ContinuePredicate || !ContinuePredicate->isConditional())
  6044. continue;
  6045. Value *Condition = ContinuePredicate->getCondition();
  6046. // If we have an edge `E` within the loop body that dominates the only
  6047. // latch, the condition guarding `E` also guards the backedge. This
  6048. // reasoning works only for loops with a single latch.
  6049. BasicBlockEdge DominatingEdge(PBB, BB);
  6050. if (DominatingEdge.isSingleEdge()) {
  6051. // We're constructively (and conservatively) enumerating edges within the
  6052. // loop body that dominate the latch. The dominator tree better agree
  6053. // with us on this:
  6054. assert(DT->dominates(DominatingEdge, Latch) && "should be!");
  6055. if (isImpliedCond(Pred, LHS, RHS, Condition,
  6056. BB != ContinuePredicate->getSuccessor(0)))
  6057. return true;
  6058. }
  6059. }
  6060. return false;
  6061. }
  6062. /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
  6063. /// by a conditional between LHS and RHS. This is used to help avoid max
  6064. /// expressions in loop trip counts, and to eliminate casts.
  6065. bool
  6066. ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
  6067. ICmpInst::Predicate Pred,
  6068. const SCEV *LHS, const SCEV *RHS) {
  6069. // Interpret a null as meaning no loop, where there is obviously no guard
  6070. // (interprocedural conditions notwithstanding).
  6071. if (!L) return false;
  6072. if (isKnownPredicateWithRanges(Pred, LHS, RHS)) return true;
  6073. // Starting at the loop predecessor, climb up the predecessor chain, as long
  6074. // as there are predecessors that can be found that have unique successors
  6075. // leading to the original header.
  6076. for (std::pair<BasicBlock *, BasicBlock *>
  6077. Pair(L->getLoopPredecessor(), L->getHeader());
  6078. Pair.first;
  6079. Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
  6080. BranchInst *LoopEntryPredicate =
  6081. dyn_cast<BranchInst>(Pair.first->getTerminator());
  6082. if (!LoopEntryPredicate ||
  6083. LoopEntryPredicate->isUnconditional())
  6084. continue;
  6085. if (isImpliedCond(Pred, LHS, RHS,
  6086. LoopEntryPredicate->getCondition(),
  6087. LoopEntryPredicate->getSuccessor(0) != Pair.second))
  6088. return true;
  6089. }
  6090. // Check conditions due to any @llvm.assume intrinsics.
  6091. for (auto &AssumeVH : AC->assumptions()) {
  6092. if (!AssumeVH)
  6093. continue;
  6094. auto *CI = cast<CallInst>(AssumeVH);
  6095. if (!DT->dominates(CI, L->getHeader()))
  6096. continue;
  6097. if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
  6098. return true;
  6099. }
  6100. return false;
  6101. }
  6102. /// RAII wrapper to prevent recursive application of isImpliedCond.
  6103. /// ScalarEvolution's PendingLoopPredicates set must be empty unless we are
  6104. /// currently evaluating isImpliedCond.
  6105. struct MarkPendingLoopPredicate {
  6106. Value *Cond;
  6107. DenseSet<Value*> &LoopPreds;
  6108. bool Pending;
  6109. MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP)
  6110. : Cond(C), LoopPreds(LP) {
  6111. Pending = !LoopPreds.insert(Cond).second;
  6112. }
  6113. ~MarkPendingLoopPredicate() {
  6114. if (!Pending)
  6115. LoopPreds.erase(Cond);
  6116. }
  6117. };
  6118. /// isImpliedCond - Test whether the condition described by Pred, LHS,
  6119. /// and RHS is true whenever the given Cond value evaluates to true.
  6120. bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
  6121. const SCEV *LHS, const SCEV *RHS,
  6122. Value *FoundCondValue,
  6123. bool Inverse) {
  6124. MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates);
  6125. if (Mark.Pending)
  6126. return false;
  6127. // Recursively handle And and Or conditions.
  6128. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
  6129. if (BO->getOpcode() == Instruction::And) {
  6130. if (!Inverse)
  6131. return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
  6132. isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
  6133. } else if (BO->getOpcode() == Instruction::Or) {
  6134. if (Inverse)
  6135. return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
  6136. isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
  6137. }
  6138. }
  6139. ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
  6140. if (!ICI) return false;
  6141. // Now that we found a conditional branch that dominates the loop or controls
  6142. // the loop latch. Check to see if it is the comparison we are looking for.
  6143. ICmpInst::Predicate FoundPred;
  6144. if (Inverse)
  6145. FoundPred = ICI->getInversePredicate();
  6146. else
  6147. FoundPred = ICI->getPredicate();
  6148. const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
  6149. const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
  6150. // Balance the types.
  6151. if (getTypeSizeInBits(LHS->getType()) <
  6152. getTypeSizeInBits(FoundLHS->getType())) {
  6153. if (CmpInst::isSigned(Pred)) {
  6154. LHS = getSignExtendExpr(LHS, FoundLHS->getType());
  6155. RHS = getSignExtendExpr(RHS, FoundLHS->getType());
  6156. } else {
  6157. LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
  6158. RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
  6159. }
  6160. } else if (getTypeSizeInBits(LHS->getType()) >
  6161. getTypeSizeInBits(FoundLHS->getType())) {
  6162. if (CmpInst::isSigned(FoundPred)) {
  6163. FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
  6164. FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
  6165. } else {
  6166. FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
  6167. FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
  6168. }
  6169. }
  6170. // Canonicalize the query to match the way instcombine will have
  6171. // canonicalized the comparison.
  6172. if (SimplifyICmpOperands(Pred, LHS, RHS))
  6173. if (LHS == RHS)
  6174. return CmpInst::isTrueWhenEqual(Pred);
  6175. if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
  6176. if (FoundLHS == FoundRHS)
  6177. return CmpInst::isFalseWhenEqual(FoundPred);
  6178. // Check to see if we can make the LHS or RHS match.
  6179. if (LHS == FoundRHS || RHS == FoundLHS) {
  6180. if (isa<SCEVConstant>(RHS)) {
  6181. std::swap(FoundLHS, FoundRHS);
  6182. FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
  6183. } else {
  6184. std::swap(LHS, RHS);
  6185. Pred = ICmpInst::getSwappedPredicate(Pred);
  6186. }
  6187. }
  6188. // Check whether the found predicate is the same as the desired predicate.
  6189. if (FoundPred == Pred)
  6190. return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
  6191. // Check whether swapping the found predicate makes it the same as the
  6192. // desired predicate.
  6193. if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
  6194. if (isa<SCEVConstant>(RHS))
  6195. return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
  6196. else
  6197. return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
  6198. RHS, LHS, FoundLHS, FoundRHS);
  6199. }
  6200. // Check if we can make progress by sharpening ranges.
  6201. if (FoundPred == ICmpInst::ICMP_NE &&
  6202. (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
  6203. const SCEVConstant *C = nullptr;
  6204. const SCEV *V = nullptr;
  6205. if (isa<SCEVConstant>(FoundLHS)) {
  6206. C = cast<SCEVConstant>(FoundLHS);
  6207. V = FoundRHS;
  6208. } else {
  6209. C = cast<SCEVConstant>(FoundRHS);
  6210. V = FoundLHS;
  6211. }
  6212. // The guarding predicate tells us that C != V. If the known range
  6213. // of V is [C, t), we can sharpen the range to [C + 1, t). The
  6214. // range we consider has to correspond to same signedness as the
  6215. // predicate we're interested in folding.
  6216. APInt Min = ICmpInst::isSigned(Pred) ?
  6217. getSignedRange(V).getSignedMin() : getUnsignedRange(V).getUnsignedMin();
  6218. if (Min == C->getValue()->getValue()) {
  6219. // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
  6220. // This is true even if (Min + 1) wraps around -- in case of
  6221. // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
  6222. APInt SharperMin = Min + 1;
  6223. switch (Pred) {
  6224. case ICmpInst::ICMP_SGE:
  6225. case ICmpInst::ICMP_UGE:
  6226. // We know V `Pred` SharperMin. If this implies LHS `Pred`
  6227. // RHS, we're done.
  6228. if (isImpliedCondOperands(Pred, LHS, RHS, V,
  6229. getConstant(SharperMin)))
  6230. return true;
  6231. case ICmpInst::ICMP_SGT:
  6232. case ICmpInst::ICMP_UGT:
  6233. // We know from the range information that (V `Pred` Min ||
  6234. // V == Min). We know from the guarding condition that !(V
  6235. // == Min). This gives us
  6236. //
  6237. // V `Pred` Min || V == Min && !(V == Min)
  6238. // => V `Pred` Min
  6239. //
  6240. // If V `Pred` Min implies LHS `Pred` RHS, we're done.
  6241. if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min)))
  6242. return true;
  6243. default:
  6244. // No change
  6245. break;
  6246. }
  6247. }
  6248. }
  6249. // Check whether the actual condition is beyond sufficient.
  6250. if (FoundPred == ICmpInst::ICMP_EQ)
  6251. if (ICmpInst::isTrueWhenEqual(Pred))
  6252. if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
  6253. return true;
  6254. if (Pred == ICmpInst::ICMP_NE)
  6255. if (!ICmpInst::isTrueWhenEqual(FoundPred))
  6256. if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
  6257. return true;
  6258. // Otherwise assume the worst.
  6259. return false;
  6260. }
  6261. /// isImpliedCondOperands - Test whether the condition described by Pred,
  6262. /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
  6263. /// and FoundRHS is true.
  6264. bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
  6265. const SCEV *LHS, const SCEV *RHS,
  6266. const SCEV *FoundLHS,
  6267. const SCEV *FoundRHS) {
  6268. if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
  6269. return true;
  6270. return isImpliedCondOperandsHelper(Pred, LHS, RHS,
  6271. FoundLHS, FoundRHS) ||
  6272. // ~x < ~y --> x > y
  6273. isImpliedCondOperandsHelper(Pred, LHS, RHS,
  6274. getNotSCEV(FoundRHS),
  6275. getNotSCEV(FoundLHS));
  6276. }
  6277. /// If Expr computes ~A, return A else return nullptr
  6278. static const SCEV *MatchNotExpr(const SCEV *Expr) {
  6279. const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
  6280. if (!Add || Add->getNumOperands() != 2) return nullptr;
  6281. const SCEVConstant *AddLHS = dyn_cast<SCEVConstant>(Add->getOperand(0));
  6282. if (!(AddLHS && AddLHS->getValue()->getValue().isAllOnesValue()))
  6283. return nullptr;
  6284. const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
  6285. if (!AddRHS || AddRHS->getNumOperands() != 2) return nullptr;
  6286. const SCEVConstant *MulLHS = dyn_cast<SCEVConstant>(AddRHS->getOperand(0));
  6287. if (!(MulLHS && MulLHS->getValue()->getValue().isAllOnesValue()))
  6288. return nullptr;
  6289. return AddRHS->getOperand(1);
  6290. }
  6291. /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values?
  6292. template<typename MaxExprType>
  6293. static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr,
  6294. const SCEV *Candidate) {
  6295. const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr);
  6296. if (!MaxExpr) return false;
  6297. auto It = std::find(MaxExpr->op_begin(), MaxExpr->op_end(), Candidate);
  6298. return It != MaxExpr->op_end();
  6299. }
  6300. /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values?
  6301. template<typename MaxExprType>
  6302. static bool IsMinConsistingOf(ScalarEvolution &SE,
  6303. const SCEV *MaybeMinExpr,
  6304. const SCEV *Candidate) {
  6305. const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr);
  6306. if (!MaybeMaxExpr)
  6307. return false;
  6308. return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate));
  6309. }
  6310. /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
  6311. /// expression?
  6312. static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
  6313. ICmpInst::Predicate Pred,
  6314. const SCEV *LHS, const SCEV *RHS) {
  6315. switch (Pred) {
  6316. default:
  6317. return false;
  6318. case ICmpInst::ICMP_SGE:
  6319. std::swap(LHS, RHS);
  6320. // fall through
  6321. case ICmpInst::ICMP_SLE:
  6322. return
  6323. // min(A, ...) <= A
  6324. IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) ||
  6325. // A <= max(A, ...)
  6326. IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
  6327. case ICmpInst::ICMP_UGE:
  6328. std::swap(LHS, RHS);
  6329. // fall through
  6330. case ICmpInst::ICMP_ULE:
  6331. return
  6332. // min(A, ...) <= A
  6333. IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) ||
  6334. // A <= max(A, ...)
  6335. IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
  6336. }
  6337. llvm_unreachable("covered switch fell through?!");
  6338. }
  6339. /// isImpliedCondOperandsHelper - Test whether the condition described by
  6340. /// Pred, LHS, and RHS is true whenever the condition described by Pred,
  6341. /// FoundLHS, and FoundRHS is true.
  6342. bool
  6343. ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
  6344. const SCEV *LHS, const SCEV *RHS,
  6345. const SCEV *FoundLHS,
  6346. const SCEV *FoundRHS) {
  6347. auto IsKnownPredicateFull =
  6348. [this](ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
  6349. return isKnownPredicateWithRanges(Pred, LHS, RHS) ||
  6350. IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS);
  6351. };
  6352. switch (Pred) {
  6353. default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  6354. case ICmpInst::ICMP_EQ:
  6355. case ICmpInst::ICMP_NE:
  6356. if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
  6357. return true;
  6358. break;
  6359. case ICmpInst::ICMP_SLT:
  6360. case ICmpInst::ICMP_SLE:
  6361. if (IsKnownPredicateFull(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
  6362. IsKnownPredicateFull(ICmpInst::ICMP_SGE, RHS, FoundRHS))
  6363. return true;
  6364. break;
  6365. case ICmpInst::ICMP_SGT:
  6366. case ICmpInst::ICMP_SGE:
  6367. if (IsKnownPredicateFull(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
  6368. IsKnownPredicateFull(ICmpInst::ICMP_SLE, RHS, FoundRHS))
  6369. return true;
  6370. break;
  6371. case ICmpInst::ICMP_ULT:
  6372. case ICmpInst::ICMP_ULE:
  6373. if (IsKnownPredicateFull(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
  6374. IsKnownPredicateFull(ICmpInst::ICMP_UGE, RHS, FoundRHS))
  6375. return true;
  6376. break;
  6377. case ICmpInst::ICMP_UGT:
  6378. case ICmpInst::ICMP_UGE:
  6379. if (IsKnownPredicateFull(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
  6380. IsKnownPredicateFull(ICmpInst::ICMP_ULE, RHS, FoundRHS))
  6381. return true;
  6382. break;
  6383. }
  6384. return false;
  6385. }
  6386. /// isImpliedCondOperandsViaRanges - helper function for isImpliedCondOperands.
  6387. /// Tries to get cases like "X `sgt` 0 => X - 1 `sgt` -1".
  6388. bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
  6389. const SCEV *LHS,
  6390. const SCEV *RHS,
  6391. const SCEV *FoundLHS,
  6392. const SCEV *FoundRHS) {
  6393. if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
  6394. // The restriction on `FoundRHS` be lifted easily -- it exists only to
  6395. // reduce the compile time impact of this optimization.
  6396. return false;
  6397. const SCEVAddExpr *AddLHS = dyn_cast<SCEVAddExpr>(LHS);
  6398. if (!AddLHS || AddLHS->getOperand(1) != FoundLHS ||
  6399. !isa<SCEVConstant>(AddLHS->getOperand(0)))
  6400. return false;
  6401. APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getValue()->getValue();
  6402. // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
  6403. // antecedent "`FoundLHS` `Pred` `FoundRHS`".
  6404. ConstantRange FoundLHSRange =
  6405. ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS);
  6406. // Since `LHS` is `FoundLHS` + `AddLHS->getOperand(0)`, we can compute a range
  6407. // for `LHS`:
  6408. APInt Addend =
  6409. cast<SCEVConstant>(AddLHS->getOperand(0))->getValue()->getValue();
  6410. ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(Addend));
  6411. // We can also compute the range of values for `LHS` that satisfy the
  6412. // consequent, "`LHS` `Pred` `RHS`":
  6413. APInt ConstRHS = cast<SCEVConstant>(RHS)->getValue()->getValue();
  6414. ConstantRange SatisfyingLHSRange =
  6415. ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
  6416. // The antecedent implies the consequent if every value of `LHS` that
  6417. // satisfies the antecedent also satisfies the consequent.
  6418. return SatisfyingLHSRange.contains(LHSRange);
  6419. }
  6420. // Verify if an linear IV with positive stride can overflow when in a
  6421. // less-than comparison, knowing the invariant term of the comparison, the
  6422. // stride and the knowledge of NSW/NUW flags on the recurrence.
  6423. bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
  6424. bool IsSigned, bool NoWrap) {
  6425. if (NoWrap) return false;
  6426. unsigned BitWidth = getTypeSizeInBits(RHS->getType());
  6427. const SCEV *One = getConstant(Stride->getType(), 1);
  6428. if (IsSigned) {
  6429. APInt MaxRHS = getSignedRange(RHS).getSignedMax();
  6430. APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
  6431. APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
  6432. .getSignedMax();
  6433. // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
  6434. return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
  6435. }
  6436. APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
  6437. APInt MaxValue = APInt::getMaxValue(BitWidth);
  6438. APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
  6439. .getUnsignedMax();
  6440. // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
  6441. return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
  6442. }
  6443. // Verify if an linear IV with negative stride can overflow when in a
  6444. // greater-than comparison, knowing the invariant term of the comparison,
  6445. // the stride and the knowledge of NSW/NUW flags on the recurrence.
  6446. bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
  6447. bool IsSigned, bool NoWrap) {
  6448. if (NoWrap) return false;
  6449. unsigned BitWidth = getTypeSizeInBits(RHS->getType());
  6450. const SCEV *One = getConstant(Stride->getType(), 1);
  6451. if (IsSigned) {
  6452. APInt MinRHS = getSignedRange(RHS).getSignedMin();
  6453. APInt MinValue = APInt::getSignedMinValue(BitWidth);
  6454. APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
  6455. .getSignedMax();
  6456. // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
  6457. return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
  6458. }
  6459. APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
  6460. APInt MinValue = APInt::getMinValue(BitWidth);
  6461. APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
  6462. .getUnsignedMax();
  6463. // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
  6464. return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
  6465. }
  6466. // Compute the backedge taken count knowing the interval difference, the
  6467. // stride and presence of the equality in the comparison.
  6468. const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
  6469. bool Equality) {
  6470. const SCEV *One = getConstant(Step->getType(), 1);
  6471. Delta = Equality ? getAddExpr(Delta, Step)
  6472. : getAddExpr(Delta, getMinusSCEV(Step, One));
  6473. return getUDivExpr(Delta, Step);
  6474. }
  6475. /// HowManyLessThans - Return the number of times a backedge containing the
  6476. /// specified less-than comparison will execute. If not computable, return
  6477. /// CouldNotCompute.
  6478. ///
  6479. /// @param ControlsExit is true when the LHS < RHS condition directly controls
  6480. /// the branch (loops exits only if condition is true). In this case, we can use
  6481. /// NoWrapFlags to skip overflow checks.
  6482. ScalarEvolution::ExitLimit
  6483. ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
  6484. const Loop *L, bool IsSigned,
  6485. bool ControlsExit) {
  6486. // We handle only IV < Invariant
  6487. if (!isLoopInvariant(RHS, L))
  6488. return getCouldNotCompute();
  6489. const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
  6490. // Avoid weird loops
  6491. if (!IV || IV->getLoop() != L || !IV->isAffine())
  6492. return getCouldNotCompute();
  6493. bool NoWrap = ControlsExit &&
  6494. IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
  6495. const SCEV *Stride = IV->getStepRecurrence(*this);
  6496. // Avoid negative or zero stride values
  6497. if (!isKnownPositive(Stride))
  6498. return getCouldNotCompute();
  6499. // Avoid proven overflow cases: this will ensure that the backedge taken count
  6500. // will not generate any unsigned overflow. Relaxed no-overflow conditions
  6501. // exploit NoWrapFlags, allowing to optimize in presence of undefined
  6502. // behaviors like the case of C language.
  6503. if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
  6504. return getCouldNotCompute();
  6505. ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
  6506. : ICmpInst::ICMP_ULT;
  6507. const SCEV *Start = IV->getStart();
  6508. const SCEV *End = RHS;
  6509. if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) {
  6510. const SCEV *Diff = getMinusSCEV(RHS, Start);
  6511. // If we have NoWrap set, then we can assume that the increment won't
  6512. // overflow, in which case if RHS - Start is a constant, we don't need to
  6513. // do a max operation since we can just figure it out statically
  6514. if (NoWrap && isa<SCEVConstant>(Diff)) {
  6515. APInt D = dyn_cast<const SCEVConstant>(Diff)->getValue()->getValue();
  6516. if (D.isNegative())
  6517. End = Start;
  6518. } else
  6519. End = IsSigned ? getSMaxExpr(RHS, Start)
  6520. : getUMaxExpr(RHS, Start);
  6521. }
  6522. const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
  6523. APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin()
  6524. : getUnsignedRange(Start).getUnsignedMin();
  6525. APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
  6526. : getUnsignedRange(Stride).getUnsignedMin();
  6527. unsigned BitWidth = getTypeSizeInBits(LHS->getType());
  6528. APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1)
  6529. : APInt::getMaxValue(BitWidth) - (MinStride - 1);
  6530. // Although End can be a MAX expression we estimate MaxEnd considering only
  6531. // the case End = RHS. This is safe because in the other case (End - Start)
  6532. // is zero, leading to a zero maximum backedge taken count.
  6533. APInt MaxEnd =
  6534. IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit)
  6535. : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit);
  6536. const SCEV *MaxBECount;
  6537. if (isa<SCEVConstant>(BECount))
  6538. MaxBECount = BECount;
  6539. else
  6540. MaxBECount = computeBECount(getConstant(MaxEnd - MinStart),
  6541. getConstant(MinStride), false);
  6542. if (isa<SCEVCouldNotCompute>(MaxBECount))
  6543. MaxBECount = BECount;
  6544. return ExitLimit(BECount, MaxBECount);
  6545. }
  6546. ScalarEvolution::ExitLimit
  6547. ScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
  6548. const Loop *L, bool IsSigned,
  6549. bool ControlsExit) {
  6550. // We handle only IV > Invariant
  6551. if (!isLoopInvariant(RHS, L))
  6552. return getCouldNotCompute();
  6553. const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
  6554. // Avoid weird loops
  6555. if (!IV || IV->getLoop() != L || !IV->isAffine())
  6556. return getCouldNotCompute();
  6557. bool NoWrap = ControlsExit &&
  6558. IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
  6559. const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
  6560. // Avoid negative or zero stride values
  6561. if (!isKnownPositive(Stride))
  6562. return getCouldNotCompute();
  6563. // Avoid proven overflow cases: this will ensure that the backedge taken count
  6564. // will not generate any unsigned overflow. Relaxed no-overflow conditions
  6565. // exploit NoWrapFlags, allowing to optimize in presence of undefined
  6566. // behaviors like the case of C language.
  6567. if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
  6568. return getCouldNotCompute();
  6569. ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
  6570. : ICmpInst::ICMP_UGT;
  6571. const SCEV *Start = IV->getStart();
  6572. const SCEV *End = RHS;
  6573. if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
  6574. const SCEV *Diff = getMinusSCEV(RHS, Start);
  6575. // If we have NoWrap set, then we can assume that the increment won't
  6576. // overflow, in which case if RHS - Start is a constant, we don't need to
  6577. // do a max operation since we can just figure it out statically
  6578. if (NoWrap && isa<SCEVConstant>(Diff)) {
  6579. APInt D = dyn_cast<const SCEVConstant>(Diff)->getValue()->getValue();
  6580. if (!D.isNegative())
  6581. End = Start;
  6582. } else
  6583. End = IsSigned ? getSMinExpr(RHS, Start)
  6584. : getUMinExpr(RHS, Start);
  6585. }
  6586. const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
  6587. APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax()
  6588. : getUnsignedRange(Start).getUnsignedMax();
  6589. APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
  6590. : getUnsignedRange(Stride).getUnsignedMin();
  6591. unsigned BitWidth = getTypeSizeInBits(LHS->getType());
  6592. APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
  6593. : APInt::getMinValue(BitWidth) + (MinStride - 1);
  6594. // Although End can be a MIN expression we estimate MinEnd considering only
  6595. // the case End = RHS. This is safe because in the other case (Start - End)
  6596. // is zero, leading to a zero maximum backedge taken count.
  6597. APInt MinEnd =
  6598. IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit)
  6599. : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit);
  6600. const SCEV *MaxBECount = getCouldNotCompute();
  6601. if (isa<SCEVConstant>(BECount))
  6602. MaxBECount = BECount;
  6603. else
  6604. MaxBECount = computeBECount(getConstant(MaxStart - MinEnd),
  6605. getConstant(MinStride), false);
  6606. if (isa<SCEVCouldNotCompute>(MaxBECount))
  6607. MaxBECount = BECount;
  6608. return ExitLimit(BECount, MaxBECount);
  6609. }
  6610. /// getNumIterationsInRange - Return the number of iterations of this loop that
  6611. /// produce values in the specified constant range. Another way of looking at
  6612. /// this is that it returns the first iteration number where the value is not in
  6613. /// the condition, thus computing the exit count. If the iteration count can't
  6614. /// be computed, an instance of SCEVCouldNotCompute is returned.
  6615. const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
  6616. ScalarEvolution &SE) const {
  6617. if (Range.isFullSet()) // Infinite loop.
  6618. return SE.getCouldNotCompute();
  6619. // If the start is a non-zero constant, shift the range to simplify things.
  6620. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
  6621. if (!SC->getValue()->isZero()) {
  6622. SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
  6623. Operands[0] = SE.getConstant(SC->getType(), 0);
  6624. const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
  6625. getNoWrapFlags(FlagNW));
  6626. if (const SCEVAddRecExpr *ShiftedAddRec =
  6627. dyn_cast<SCEVAddRecExpr>(Shifted))
  6628. return ShiftedAddRec->getNumIterationsInRange(
  6629. Range.subtract(SC->getValue()->getValue()), SE);
  6630. // This is strange and shouldn't happen.
  6631. return SE.getCouldNotCompute();
  6632. }
  6633. // The only time we can solve this is when we have all constant indices.
  6634. // Otherwise, we cannot determine the overflow conditions.
  6635. for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
  6636. if (!isa<SCEVConstant>(getOperand(i)))
  6637. return SE.getCouldNotCompute();
  6638. // Okay at this point we know that all elements of the chrec are constants and
  6639. // that the start element is zero.
  6640. // First check to see if the range contains zero. If not, the first
  6641. // iteration exits.
  6642. unsigned BitWidth = SE.getTypeSizeInBits(getType());
  6643. if (!Range.contains(APInt(BitWidth, 0)))
  6644. return SE.getConstant(getType(), 0);
  6645. if (isAffine()) {
  6646. // If this is an affine expression then we have this situation:
  6647. // Solve {0,+,A} in Range === Ax in Range
  6648. // We know that zero is in the range. If A is positive then we know that
  6649. // the upper value of the range must be the first possible exit value.
  6650. // If A is negative then the lower of the range is the last possible loop
  6651. // value. Also note that we already checked for a full range.
  6652. APInt One(BitWidth,1);
  6653. APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
  6654. APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
  6655. // The exit value should be (End+A)/A.
  6656. APInt ExitVal = (End + A).udiv(A);
  6657. ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
  6658. // Evaluate at the exit value. If we really did fall out of the valid
  6659. // range, then we computed our trip count, otherwise wrap around or other
  6660. // things must have happened.
  6661. ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
  6662. if (Range.contains(Val->getValue()))
  6663. return SE.getCouldNotCompute(); // Something strange happened
  6664. // Ensure that the previous value is in the range. This is a sanity check.
  6665. assert(Range.contains(
  6666. EvaluateConstantChrecAtConstant(this,
  6667. ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
  6668. "Linear scev computation is off in a bad way!");
  6669. return SE.getConstant(ExitValue);
  6670. } else if (isQuadratic()) {
  6671. // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
  6672. // quadratic equation to solve it. To do this, we must frame our problem in
  6673. // terms of figuring out when zero is crossed, instead of when
  6674. // Range.getUpper() is crossed.
  6675. SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
  6676. NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
  6677. const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
  6678. // getNoWrapFlags(FlagNW)
  6679. FlagAnyWrap);
  6680. // Next, solve the constructed addrec
  6681. std::pair<const SCEV *,const SCEV *> Roots =
  6682. SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
  6683. const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
  6684. const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
  6685. if (R1) {
  6686. // Pick the smallest positive root value.
  6687. if (ConstantInt *CB =
  6688. dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
  6689. R1->getValue(), R2->getValue()))) {
  6690. if (!CB->getZExtValue())
  6691. std::swap(R1, R2); // R1 is the minimum root now.
  6692. // Make sure the root is not off by one. The returned iteration should
  6693. // not be in the range, but the previous one should be. When solving
  6694. // for "X*X < 5", for example, we should not return a root of 2.
  6695. ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
  6696. R1->getValue(),
  6697. SE);
  6698. if (Range.contains(R1Val->getValue())) {
  6699. // The next iteration must be out of the range...
  6700. ConstantInt *NextVal =
  6701. ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
  6702. R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
  6703. if (!Range.contains(R1Val->getValue()))
  6704. return SE.getConstant(NextVal);
  6705. return SE.getCouldNotCompute(); // Something strange happened
  6706. }
  6707. // If R1 was not in the range, then it is a good return value. Make
  6708. // sure that R1-1 WAS in the range though, just in case.
  6709. ConstantInt *NextVal =
  6710. ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
  6711. R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
  6712. if (Range.contains(R1Val->getValue()))
  6713. return R1;
  6714. return SE.getCouldNotCompute(); // Something strange happened
  6715. }
  6716. }
  6717. }
  6718. return SE.getCouldNotCompute();
  6719. }
  6720. namespace {
  6721. struct FindUndefs {
  6722. bool Found;
  6723. FindUndefs() : Found(false) {}
  6724. bool follow(const SCEV *S) {
  6725. if (const SCEVUnknown *C = dyn_cast<SCEVUnknown>(S)) {
  6726. if (isa<UndefValue>(C->getValue()))
  6727. Found = true;
  6728. } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
  6729. if (isa<UndefValue>(C->getValue()))
  6730. Found = true;
  6731. }
  6732. // Keep looking if we haven't found it yet.
  6733. return !Found;
  6734. }
  6735. bool isDone() const {
  6736. // Stop recursion if we have found an undef.
  6737. return Found;
  6738. }
  6739. };
  6740. }
  6741. // Return true when S contains at least an undef value.
  6742. static inline bool
  6743. containsUndefs(const SCEV *S) {
  6744. FindUndefs F;
  6745. SCEVTraversal<FindUndefs> ST(F);
  6746. ST.visitAll(S);
  6747. return F.Found;
  6748. }
  6749. namespace {
  6750. // Collect all steps of SCEV expressions.
  6751. struct SCEVCollectStrides {
  6752. ScalarEvolution &SE;
  6753. SmallVectorImpl<const SCEV *> &Strides;
  6754. SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
  6755. : SE(SE), Strides(S) {}
  6756. bool follow(const SCEV *S) {
  6757. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
  6758. Strides.push_back(AR->getStepRecurrence(SE));
  6759. return true;
  6760. }
  6761. bool isDone() const { return false; }
  6762. };
  6763. // Collect all SCEVUnknown and SCEVMulExpr expressions.
  6764. struct SCEVCollectTerms {
  6765. SmallVectorImpl<const SCEV *> &Terms;
  6766. SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T)
  6767. : Terms(T) {}
  6768. bool follow(const SCEV *S) {
  6769. if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S)) {
  6770. if (!containsUndefs(S))
  6771. Terms.push_back(S);
  6772. // Stop recursion: once we collected a term, do not walk its operands.
  6773. return false;
  6774. }
  6775. // Keep looking.
  6776. return true;
  6777. }
  6778. bool isDone() const { return false; }
  6779. };
  6780. }
  6781. /// Find parametric terms in this SCEVAddRecExpr.
  6782. void ScalarEvolution::collectParametricTerms(const SCEV *Expr,
  6783. SmallVectorImpl<const SCEV *> &Terms) {
  6784. SmallVector<const SCEV *, 4> Strides;
  6785. SCEVCollectStrides StrideCollector(*this, Strides);
  6786. visitAll(Expr, StrideCollector);
  6787. DEBUG({
  6788. dbgs() << "Strides:\n";
  6789. for (const SCEV *S : Strides)
  6790. dbgs() << *S << "\n";
  6791. });
  6792. for (const SCEV *S : Strides) {
  6793. SCEVCollectTerms TermCollector(Terms);
  6794. visitAll(S, TermCollector);
  6795. }
  6796. DEBUG({
  6797. dbgs() << "Terms:\n";
  6798. for (const SCEV *T : Terms)
  6799. dbgs() << *T << "\n";
  6800. });
  6801. }
  6802. static bool findArrayDimensionsRec(ScalarEvolution &SE,
  6803. SmallVectorImpl<const SCEV *> &Terms,
  6804. SmallVectorImpl<const SCEV *> &Sizes) {
  6805. int Last = Terms.size() - 1;
  6806. const SCEV *Step = Terms[Last];
  6807. // End of recursion.
  6808. if (Last == 0) {
  6809. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
  6810. SmallVector<const SCEV *, 2> Qs;
  6811. for (const SCEV *Op : M->operands())
  6812. if (!isa<SCEVConstant>(Op))
  6813. Qs.push_back(Op);
  6814. Step = SE.getMulExpr(Qs);
  6815. }
  6816. Sizes.push_back(Step);
  6817. return true;
  6818. }
  6819. for (const SCEV *&Term : Terms) {
  6820. // Normalize the terms before the next call to findArrayDimensionsRec.
  6821. const SCEV *Q, *R;
  6822. SCEVDivision::divide(SE, Term, Step, &Q, &R);
  6823. // Bail out when GCD does not evenly divide one of the terms.
  6824. if (!R->isZero())
  6825. return false;
  6826. Term = Q;
  6827. }
  6828. // Remove all SCEVConstants.
  6829. Terms.erase(std::remove_if(Terms.begin(), Terms.end(), [](const SCEV *E) {
  6830. return isa<SCEVConstant>(E);
  6831. }),
  6832. Terms.end());
  6833. if (Terms.size() > 0)
  6834. if (!findArrayDimensionsRec(SE, Terms, Sizes))
  6835. return false;
  6836. Sizes.push_back(Step);
  6837. return true;
  6838. }
  6839. namespace {
  6840. struct FindParameter {
  6841. bool FoundParameter;
  6842. FindParameter() : FoundParameter(false) {}
  6843. bool follow(const SCEV *S) {
  6844. if (isa<SCEVUnknown>(S)) {
  6845. FoundParameter = true;
  6846. // Stop recursion: we found a parameter.
  6847. return false;
  6848. }
  6849. // Keep looking.
  6850. return true;
  6851. }
  6852. bool isDone() const {
  6853. // Stop recursion if we have found a parameter.
  6854. return FoundParameter;
  6855. }
  6856. };
  6857. }
  6858. // Returns true when S contains at least a SCEVUnknown parameter.
  6859. static inline bool
  6860. containsParameters(const SCEV *S) {
  6861. FindParameter F;
  6862. SCEVTraversal<FindParameter> ST(F);
  6863. ST.visitAll(S);
  6864. return F.FoundParameter;
  6865. }
  6866. // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
  6867. static inline bool
  6868. containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
  6869. for (const SCEV *T : Terms)
  6870. if (containsParameters(T))
  6871. return true;
  6872. return false;
  6873. }
  6874. // Return the number of product terms in S.
  6875. static inline int numberOfTerms(const SCEV *S) {
  6876. if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
  6877. return Expr->getNumOperands();
  6878. return 1;
  6879. }
  6880. static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
  6881. if (isa<SCEVConstant>(T))
  6882. return nullptr;
  6883. if (isa<SCEVUnknown>(T))
  6884. return T;
  6885. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
  6886. SmallVector<const SCEV *, 2> Factors;
  6887. for (const SCEV *Op : M->operands())
  6888. if (!isa<SCEVConstant>(Op))
  6889. Factors.push_back(Op);
  6890. return SE.getMulExpr(Factors);
  6891. }
  6892. return T;
  6893. }
  6894. /// Return the size of an element read or written by Inst.
  6895. const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
  6896. Type *Ty;
  6897. if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
  6898. Ty = Store->getValueOperand()->getType();
  6899. else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
  6900. Ty = Load->getType();
  6901. else
  6902. return nullptr;
  6903. Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
  6904. return getSizeOfExpr(ETy, Ty);
  6905. }
  6906. /// Second step of delinearization: compute the array dimensions Sizes from the
  6907. /// set of Terms extracted from the memory access function of this SCEVAddRec.
  6908. void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
  6909. SmallVectorImpl<const SCEV *> &Sizes,
  6910. const SCEV *ElementSize) const {
  6911. if (Terms.size() < 1 || !ElementSize)
  6912. return;
  6913. // Early return when Terms do not contain parameters: we do not delinearize
  6914. // non parametric SCEVs.
  6915. if (!containsParameters(Terms))
  6916. return;
  6917. DEBUG({
  6918. dbgs() << "Terms:\n";
  6919. for (const SCEV *T : Terms)
  6920. dbgs() << *T << "\n";
  6921. });
  6922. // Remove duplicates.
  6923. std::sort(Terms.begin(), Terms.end());
  6924. Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
  6925. // Put larger terms first.
  6926. std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) {
  6927. return numberOfTerms(LHS) > numberOfTerms(RHS);
  6928. });
  6929. ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
  6930. // Divide all terms by the element size.
  6931. for (const SCEV *&Term : Terms) {
  6932. const SCEV *Q, *R;
  6933. SCEVDivision::divide(SE, Term, ElementSize, &Q, &R);
  6934. Term = Q;
  6935. }
  6936. SmallVector<const SCEV *, 4> NewTerms;
  6937. // Remove constant factors.
  6938. for (const SCEV *T : Terms)
  6939. if (const SCEV *NewT = removeConstantFactors(SE, T))
  6940. NewTerms.push_back(NewT);
  6941. DEBUG({
  6942. dbgs() << "Terms after sorting:\n";
  6943. for (const SCEV *T : NewTerms)
  6944. dbgs() << *T << "\n";
  6945. });
  6946. if (NewTerms.empty() ||
  6947. !findArrayDimensionsRec(SE, NewTerms, Sizes)) {
  6948. Sizes.clear();
  6949. return;
  6950. }
  6951. // The last element to be pushed into Sizes is the size of an element.
  6952. Sizes.push_back(ElementSize);
  6953. DEBUG({
  6954. dbgs() << "Sizes:\n";
  6955. for (const SCEV *S : Sizes)
  6956. dbgs() << *S << "\n";
  6957. });
  6958. }
  6959. /// Third step of delinearization: compute the access functions for the
  6960. /// Subscripts based on the dimensions in Sizes.
  6961. void ScalarEvolution::computeAccessFunctions(
  6962. const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
  6963. SmallVectorImpl<const SCEV *> &Sizes) {
  6964. // Early exit in case this SCEV is not an affine multivariate function.
  6965. if (Sizes.empty())
  6966. return;
  6967. if (auto AR = dyn_cast<SCEVAddRecExpr>(Expr))
  6968. if (!AR->isAffine())
  6969. return;
  6970. const SCEV *Res = Expr;
  6971. int Last = Sizes.size() - 1;
  6972. for (int i = Last; i >= 0; i--) {
  6973. const SCEV *Q, *R;
  6974. SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R);
  6975. DEBUG({
  6976. dbgs() << "Res: " << *Res << "\n";
  6977. dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
  6978. dbgs() << "Res divided by Sizes[i]:\n";
  6979. dbgs() << "Quotient: " << *Q << "\n";
  6980. dbgs() << "Remainder: " << *R << "\n";
  6981. });
  6982. Res = Q;
  6983. // Do not record the last subscript corresponding to the size of elements in
  6984. // the array.
  6985. if (i == Last) {
  6986. // Bail out if the remainder is too complex.
  6987. if (isa<SCEVAddRecExpr>(R)) {
  6988. Subscripts.clear();
  6989. Sizes.clear();
  6990. return;
  6991. }
  6992. continue;
  6993. }
  6994. // Record the access function for the current subscript.
  6995. Subscripts.push_back(R);
  6996. }
  6997. // Also push in last position the remainder of the last division: it will be
  6998. // the access function of the innermost dimension.
  6999. Subscripts.push_back(Res);
  7000. std::reverse(Subscripts.begin(), Subscripts.end());
  7001. DEBUG({
  7002. dbgs() << "Subscripts:\n";
  7003. for (const SCEV *S : Subscripts)
  7004. dbgs() << *S << "\n";
  7005. });
  7006. }
  7007. /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
  7008. /// sizes of an array access. Returns the remainder of the delinearization that
  7009. /// is the offset start of the array. The SCEV->delinearize algorithm computes
  7010. /// the multiples of SCEV coefficients: that is a pattern matching of sub
  7011. /// expressions in the stride and base of a SCEV corresponding to the
  7012. /// computation of a GCD (greatest common divisor) of base and stride. When
  7013. /// SCEV->delinearize fails, it returns the SCEV unchanged.
  7014. ///
  7015. /// For example: when analyzing the memory access A[i][j][k] in this loop nest
  7016. ///
  7017. /// void foo(long n, long m, long o, double A[n][m][o]) {
  7018. ///
  7019. /// for (long i = 0; i < n; i++)
  7020. /// for (long j = 0; j < m; j++)
  7021. /// for (long k = 0; k < o; k++)
  7022. /// A[i][j][k] = 1.0;
  7023. /// }
  7024. ///
  7025. /// the delinearization input is the following AddRec SCEV:
  7026. ///
  7027. /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
  7028. ///
  7029. /// From this SCEV, we are able to say that the base offset of the access is %A
  7030. /// because it appears as an offset that does not divide any of the strides in
  7031. /// the loops:
  7032. ///
  7033. /// CHECK: Base offset: %A
  7034. ///
  7035. /// and then SCEV->delinearize determines the size of some of the dimensions of
  7036. /// the array as these are the multiples by which the strides are happening:
  7037. ///
  7038. /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
  7039. ///
  7040. /// Note that the outermost dimension remains of UnknownSize because there are
  7041. /// no strides that would help identifying the size of the last dimension: when
  7042. /// the array has been statically allocated, one could compute the size of that
  7043. /// dimension by dividing the overall size of the array by the size of the known
  7044. /// dimensions: %m * %o * 8.
  7045. ///
  7046. /// Finally delinearize provides the access functions for the array reference
  7047. /// that does correspond to A[i][j][k] of the above C testcase:
  7048. ///
  7049. /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
  7050. ///
  7051. /// The testcases are checking the output of a function pass:
  7052. /// DelinearizationPass that walks through all loads and stores of a function
  7053. /// asking for the SCEV of the memory access with respect to all enclosing
  7054. /// loops, calling SCEV->delinearize on that and printing the results.
  7055. void ScalarEvolution::delinearize(const SCEV *Expr,
  7056. SmallVectorImpl<const SCEV *> &Subscripts,
  7057. SmallVectorImpl<const SCEV *> &Sizes,
  7058. const SCEV *ElementSize) {
  7059. // First step: collect parametric terms.
  7060. SmallVector<const SCEV *, 4> Terms;
  7061. collectParametricTerms(Expr, Terms);
  7062. if (Terms.empty())
  7063. return;
  7064. // Second step: find subscript sizes.
  7065. findArrayDimensions(Terms, Sizes, ElementSize);
  7066. if (Sizes.empty())
  7067. return;
  7068. // Third step: compute the access functions for each subscript.
  7069. computeAccessFunctions(Expr, Subscripts, Sizes);
  7070. if (Subscripts.empty())
  7071. return;
  7072. DEBUG({
  7073. dbgs() << "succeeded to delinearize " << *Expr << "\n";
  7074. dbgs() << "ArrayDecl[UnknownSize]";
  7075. for (const SCEV *S : Sizes)
  7076. dbgs() << "[" << *S << "]";
  7077. dbgs() << "\nArrayRef";
  7078. for (const SCEV *S : Subscripts)
  7079. dbgs() << "[" << *S << "]";
  7080. dbgs() << "\n";
  7081. });
  7082. }
  7083. //===----------------------------------------------------------------------===//
  7084. // SCEVCallbackVH Class Implementation
  7085. //===----------------------------------------------------------------------===//
  7086. void ScalarEvolution::SCEVCallbackVH::deleted() {
  7087. assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
  7088. if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
  7089. SE->ConstantEvolutionLoopExitValue.erase(PN);
  7090. SE->ValueExprMap.erase(getValPtr());
  7091. // this now dangles!
  7092. }
  7093. void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
  7094. assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
  7095. // Forget all the expressions associated with users of the old value,
  7096. // so that future queries will recompute the expressions using the new
  7097. // value.
  7098. Value *Old = getValPtr();
  7099. SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
  7100. SmallPtrSet<User *, 8> Visited;
  7101. while (!Worklist.empty()) {
  7102. User *U = Worklist.pop_back_val();
  7103. // Deleting the Old value will cause this to dangle. Postpone
  7104. // that until everything else is done.
  7105. if (U == Old)
  7106. continue;
  7107. if (!Visited.insert(U).second)
  7108. continue;
  7109. if (PHINode *PN = dyn_cast<PHINode>(U))
  7110. SE->ConstantEvolutionLoopExitValue.erase(PN);
  7111. SE->ValueExprMap.erase(U);
  7112. Worklist.insert(Worklist.end(), U->user_begin(), U->user_end());
  7113. }
  7114. // Delete the Old value.
  7115. if (PHINode *PN = dyn_cast<PHINode>(Old))
  7116. SE->ConstantEvolutionLoopExitValue.erase(PN);
  7117. SE->ValueExprMap.erase(Old);
  7118. // this now dangles!
  7119. }
  7120. ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
  7121. : CallbackVH(V), SE(se) {}
  7122. //===----------------------------------------------------------------------===//
  7123. // ScalarEvolution Class Implementation
  7124. //===----------------------------------------------------------------------===//
  7125. ScalarEvolution::ScalarEvolution()
  7126. : FunctionPass(ID), WalkingBEDominatingConds(false), ValuesAtScopes(64),
  7127. LoopDispositions(64), BlockDispositions(64), FirstUnknown(nullptr) {
  7128. initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
  7129. }
  7130. bool ScalarEvolution::runOnFunction(Function &F) {
  7131. this->F = &F;
  7132. AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
  7133. LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  7134. TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
  7135. DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  7136. return false;
  7137. }
  7138. void ScalarEvolution::releaseMemory() {
  7139. // Iterate through all the SCEVUnknown instances and call their
  7140. // destructors, so that they release their references to their values.
  7141. for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
  7142. U->~SCEVUnknown();
  7143. FirstUnknown = nullptr;
  7144. ValueExprMap.clear();
  7145. // Free any extra memory created for ExitNotTakenInfo in the unlikely event
  7146. // that a loop had multiple computable exits.
  7147. for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
  7148. BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end();
  7149. I != E; ++I) {
  7150. I->second.clear();
  7151. }
  7152. assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
  7153. assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
  7154. BackedgeTakenCounts.clear();
  7155. ConstantEvolutionLoopExitValue.clear();
  7156. ValuesAtScopes.clear();
  7157. LoopDispositions.clear();
  7158. BlockDispositions.clear();
  7159. UnsignedRanges.clear();
  7160. SignedRanges.clear();
  7161. UniqueSCEVs.clear();
  7162. SCEVAllocator.Reset();
  7163. }
  7164. void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
  7165. AU.setPreservesAll();
  7166. AU.addRequired<AssumptionCacheTracker>();
  7167. AU.addRequiredTransitive<LoopInfoWrapperPass>();
  7168. AU.addRequiredTransitive<DominatorTreeWrapperPass>();
  7169. AU.addRequired<TargetLibraryInfoWrapperPass>();
  7170. }
  7171. bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
  7172. return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
  7173. }
  7174. static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
  7175. const Loop *L) {
  7176. // Print all inner loops first
  7177. for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
  7178. PrintLoopInfo(OS, SE, *I);
  7179. OS << "Loop ";
  7180. L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
  7181. OS << ": ";
  7182. SmallVector<BasicBlock *, 8> ExitBlocks;
  7183. L->getExitBlocks(ExitBlocks);
  7184. if (ExitBlocks.size() != 1)
  7185. OS << "<multiple exits> ";
  7186. if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
  7187. OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
  7188. } else {
  7189. OS << "Unpredictable backedge-taken count. ";
  7190. }
  7191. OS << "\n"
  7192. "Loop ";
  7193. L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
  7194. OS << ": ";
  7195. if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
  7196. OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
  7197. } else {
  7198. OS << "Unpredictable max backedge-taken count. ";
  7199. }
  7200. OS << "\n";
  7201. }
  7202. void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
  7203. // ScalarEvolution's implementation of the print method is to print
  7204. // out SCEV values of all instructions that are interesting. Doing
  7205. // this potentially causes it to create new SCEV objects though,
  7206. // which technically conflicts with the const qualifier. This isn't
  7207. // observable from outside the class though, so casting away the
  7208. // const isn't dangerous.
  7209. ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
  7210. OS << "Classifying expressions for: ";
  7211. F->printAsOperand(OS, /*PrintType=*/false);
  7212. OS << "\n";
  7213. for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
  7214. if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
  7215. OS << *I << '\n';
  7216. OS << " --> ";
  7217. const SCEV *SV = SE.getSCEV(&*I);
  7218. SV->print(OS);
  7219. if (!isa<SCEVCouldNotCompute>(SV)) {
  7220. OS << " U: ";
  7221. SE.getUnsignedRange(SV).print(OS);
  7222. OS << " S: ";
  7223. SE.getSignedRange(SV).print(OS);
  7224. }
  7225. const Loop *L = LI->getLoopFor((*I).getParent());
  7226. const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
  7227. if (AtUse != SV) {
  7228. OS << " --> ";
  7229. AtUse->print(OS);
  7230. if (!isa<SCEVCouldNotCompute>(AtUse)) {
  7231. OS << " U: ";
  7232. SE.getUnsignedRange(AtUse).print(OS);
  7233. OS << " S: ";
  7234. SE.getSignedRange(AtUse).print(OS);
  7235. }
  7236. }
  7237. if (L) {
  7238. OS << "\t\t" "Exits: ";
  7239. const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
  7240. if (!SE.isLoopInvariant(ExitValue, L)) {
  7241. OS << "<<Unknown>>";
  7242. } else {
  7243. OS << *ExitValue;
  7244. }
  7245. }
  7246. OS << "\n";
  7247. }
  7248. OS << "Determining loop execution counts for: ";
  7249. F->printAsOperand(OS, /*PrintType=*/false);
  7250. OS << "\n";
  7251. for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
  7252. PrintLoopInfo(OS, &SE, *I);
  7253. }
  7254. ScalarEvolution::LoopDisposition
  7255. ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
  7256. auto &Values = LoopDispositions[S];
  7257. for (auto &V : Values) {
  7258. if (V.getPointer() == L)
  7259. return V.getInt();
  7260. }
  7261. Values.emplace_back(L, LoopVariant);
  7262. LoopDisposition D = computeLoopDisposition(S, L);
  7263. auto &Values2 = LoopDispositions[S];
  7264. for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
  7265. if (V.getPointer() == L) {
  7266. V.setInt(D);
  7267. break;
  7268. }
  7269. }
  7270. return D;
  7271. }
  7272. ScalarEvolution::LoopDisposition
  7273. ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
  7274. switch (static_cast<SCEVTypes>(S->getSCEVType())) {
  7275. case scConstant:
  7276. return LoopInvariant;
  7277. case scTruncate:
  7278. case scZeroExtend:
  7279. case scSignExtend:
  7280. return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
  7281. case scAddRecExpr: {
  7282. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
  7283. // If L is the addrec's loop, it's computable.
  7284. if (AR->getLoop() == L)
  7285. return LoopComputable;
  7286. // Add recurrences are never invariant in the function-body (null loop).
  7287. if (!L)
  7288. return LoopVariant;
  7289. // This recurrence is variant w.r.t. L if L contains AR's loop.
  7290. if (L->contains(AR->getLoop()))
  7291. return LoopVariant;
  7292. // This recurrence is invariant w.r.t. L if AR's loop contains L.
  7293. if (AR->getLoop()->contains(L))
  7294. return LoopInvariant;
  7295. // This recurrence is variant w.r.t. L if any of its operands
  7296. // are variant.
  7297. for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
  7298. I != E; ++I)
  7299. if (!isLoopInvariant(*I, L))
  7300. return LoopVariant;
  7301. // Otherwise it's loop-invariant.
  7302. return LoopInvariant;
  7303. }
  7304. case scAddExpr:
  7305. case scMulExpr:
  7306. case scUMaxExpr:
  7307. case scSMaxExpr: {
  7308. const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
  7309. bool HasVarying = false;
  7310. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  7311. I != E; ++I) {
  7312. LoopDisposition D = getLoopDisposition(*I, L);
  7313. if (D == LoopVariant)
  7314. return LoopVariant;
  7315. if (D == LoopComputable)
  7316. HasVarying = true;
  7317. }
  7318. return HasVarying ? LoopComputable : LoopInvariant;
  7319. }
  7320. case scUDivExpr: {
  7321. const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
  7322. LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
  7323. if (LD == LoopVariant)
  7324. return LoopVariant;
  7325. LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
  7326. if (RD == LoopVariant)
  7327. return LoopVariant;
  7328. return (LD == LoopInvariant && RD == LoopInvariant) ?
  7329. LoopInvariant : LoopComputable;
  7330. }
  7331. case scUnknown:
  7332. // All non-instruction values are loop invariant. All instructions are loop
  7333. // invariant if they are not contained in the specified loop.
  7334. // Instructions are never considered invariant in the function body
  7335. // (null loop) because they are defined within the "loop".
  7336. if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
  7337. return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
  7338. return LoopInvariant;
  7339. case scCouldNotCompute:
  7340. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  7341. }
  7342. llvm_unreachable("Unknown SCEV kind!");
  7343. }
  7344. bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
  7345. return getLoopDisposition(S, L) == LoopInvariant;
  7346. }
  7347. bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
  7348. return getLoopDisposition(S, L) == LoopComputable;
  7349. }
  7350. ScalarEvolution::BlockDisposition
  7351. ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
  7352. auto &Values = BlockDispositions[S];
  7353. for (auto &V : Values) {
  7354. if (V.getPointer() == BB)
  7355. return V.getInt();
  7356. }
  7357. Values.emplace_back(BB, DoesNotDominateBlock);
  7358. BlockDisposition D = computeBlockDisposition(S, BB);
  7359. auto &Values2 = BlockDispositions[S];
  7360. for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
  7361. if (V.getPointer() == BB) {
  7362. V.setInt(D);
  7363. break;
  7364. }
  7365. }
  7366. return D;
  7367. }
  7368. ScalarEvolution::BlockDisposition
  7369. ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
  7370. switch (static_cast<SCEVTypes>(S->getSCEVType())) {
  7371. case scConstant:
  7372. return ProperlyDominatesBlock;
  7373. case scTruncate:
  7374. case scZeroExtend:
  7375. case scSignExtend:
  7376. return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
  7377. case scAddRecExpr: {
  7378. // This uses a "dominates" query instead of "properly dominates" query
  7379. // to test for proper dominance too, because the instruction which
  7380. // produces the addrec's value is a PHI, and a PHI effectively properly
  7381. // dominates its entire containing block.
  7382. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
  7383. if (!DT->dominates(AR->getLoop()->getHeader(), BB))
  7384. return DoesNotDominateBlock;
  7385. }
  7386. // FALL THROUGH into SCEVNAryExpr handling.
  7387. case scAddExpr:
  7388. case scMulExpr:
  7389. case scUMaxExpr:
  7390. case scSMaxExpr: {
  7391. const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
  7392. bool Proper = true;
  7393. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  7394. I != E; ++I) {
  7395. BlockDisposition D = getBlockDisposition(*I, BB);
  7396. if (D == DoesNotDominateBlock)
  7397. return DoesNotDominateBlock;
  7398. if (D == DominatesBlock)
  7399. Proper = false;
  7400. }
  7401. return Proper ? ProperlyDominatesBlock : DominatesBlock;
  7402. }
  7403. case scUDivExpr: {
  7404. const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
  7405. const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
  7406. BlockDisposition LD = getBlockDisposition(LHS, BB);
  7407. if (LD == DoesNotDominateBlock)
  7408. return DoesNotDominateBlock;
  7409. BlockDisposition RD = getBlockDisposition(RHS, BB);
  7410. if (RD == DoesNotDominateBlock)
  7411. return DoesNotDominateBlock;
  7412. return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
  7413. ProperlyDominatesBlock : DominatesBlock;
  7414. }
  7415. case scUnknown:
  7416. if (Instruction *I =
  7417. dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
  7418. if (I->getParent() == BB)
  7419. return DominatesBlock;
  7420. if (DT->properlyDominates(I->getParent(), BB))
  7421. return ProperlyDominatesBlock;
  7422. return DoesNotDominateBlock;
  7423. }
  7424. return ProperlyDominatesBlock;
  7425. case scCouldNotCompute:
  7426. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  7427. }
  7428. llvm_unreachable("Unknown SCEV kind!");
  7429. }
  7430. bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
  7431. return getBlockDisposition(S, BB) >= DominatesBlock;
  7432. }
  7433. bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
  7434. return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
  7435. }
  7436. namespace {
  7437. // Search for a SCEV expression node within an expression tree.
  7438. // Implements SCEVTraversal::Visitor.
  7439. struct SCEVSearch {
  7440. const SCEV *Node;
  7441. bool IsFound;
  7442. SCEVSearch(const SCEV *N): Node(N), IsFound(false) {}
  7443. bool follow(const SCEV *S) {
  7444. IsFound |= (S == Node);
  7445. return !IsFound;
  7446. }
  7447. bool isDone() const { return IsFound; }
  7448. };
  7449. }
  7450. bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
  7451. SCEVSearch Search(Op);
  7452. visitAll(S, Search);
  7453. return Search.IsFound;
  7454. }
  7455. void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
  7456. ValuesAtScopes.erase(S);
  7457. LoopDispositions.erase(S);
  7458. BlockDispositions.erase(S);
  7459. UnsignedRanges.erase(S);
  7460. SignedRanges.erase(S);
  7461. for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
  7462. BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ) {
  7463. BackedgeTakenInfo &BEInfo = I->second;
  7464. if (BEInfo.hasOperand(S, this)) {
  7465. BEInfo.clear();
  7466. BackedgeTakenCounts.erase(I++);
  7467. }
  7468. else
  7469. ++I;
  7470. }
  7471. }
  7472. typedef DenseMap<const Loop *, std::string> VerifyMap;
  7473. /// replaceSubString - Replaces all occurrences of From in Str with To.
  7474. static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
  7475. size_t Pos = 0;
  7476. while ((Pos = Str.find(From, Pos)) != std::string::npos) {
  7477. Str.replace(Pos, From.size(), To.data(), To.size());
  7478. Pos += To.size();
  7479. }
  7480. }
  7481. /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
  7482. static void
  7483. getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
  7484. for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
  7485. getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
  7486. std::string &S = Map[L];
  7487. if (S.empty()) {
  7488. raw_string_ostream OS(S);
  7489. SE.getBackedgeTakenCount(L)->print(OS);
  7490. // false and 0 are semantically equivalent. This can happen in dead loops.
  7491. replaceSubString(OS.str(), "false", "0");
  7492. // Remove wrap flags, their use in SCEV is highly fragile.
  7493. // FIXME: Remove this when SCEV gets smarter about them.
  7494. replaceSubString(OS.str(), "<nw>", "");
  7495. replaceSubString(OS.str(), "<nsw>", "");
  7496. replaceSubString(OS.str(), "<nuw>", "");
  7497. }
  7498. }
  7499. }
  7500. void ScalarEvolution::verifyAnalysis() const {
  7501. if (!VerifySCEV)
  7502. return;
  7503. ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
  7504. // Gather stringified backedge taken counts for all loops using SCEV's caches.
  7505. // FIXME: It would be much better to store actual values instead of strings,
  7506. // but SCEV pointers will change if we drop the caches.
  7507. VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
  7508. for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
  7509. getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
  7510. // Gather stringified backedge taken counts for all loops without using
  7511. // SCEV's caches.
  7512. SE.releaseMemory();
  7513. for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
  7514. getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
  7515. // Now compare whether they're the same with and without caches. This allows
  7516. // verifying that no pass changed the cache.
  7517. assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
  7518. "New loops suddenly appeared!");
  7519. for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
  7520. OldE = BackedgeDumpsOld.end(),
  7521. NewI = BackedgeDumpsNew.begin();
  7522. OldI != OldE; ++OldI, ++NewI) {
  7523. assert(OldI->first == NewI->first && "Loop order changed!");
  7524. // Compare the stringified SCEVs. We don't care if undef backedgetaken count
  7525. // changes.
  7526. // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
  7527. // means that a pass is buggy or SCEV has to learn a new pattern but is
  7528. // usually not harmful.
  7529. if (OldI->second != NewI->second &&
  7530. OldI->second.find("undef") == std::string::npos &&
  7531. NewI->second.find("undef") == std::string::npos &&
  7532. OldI->second != "***COULDNOTCOMPUTE***" &&
  7533. NewI->second != "***COULDNOTCOMPUTE***") {
  7534. dbgs() << "SCEVValidator: SCEV for loop '"
  7535. << OldI->first->getHeader()->getName()
  7536. << "' changed from '" << OldI->second
  7537. << "' to '" << NewI->second << "'!\n";
  7538. std::abort();
  7539. }
  7540. }
  7541. // TODO: Verify more things.
  7542. }