SelectionDAG.cpp 264 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174
  1. //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This implements the SelectionDAG class.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/CodeGen/SelectionDAG.h"
  14. #include "SDNodeDbgValue.h"
  15. #include "llvm/ADT/SetVector.h"
  16. #include "llvm/ADT/SmallPtrSet.h"
  17. #include "llvm/ADT/SmallSet.h"
  18. #include "llvm/ADT/SmallVector.h"
  19. #include "llvm/ADT/StringExtras.h"
  20. #include "llvm/Analysis/ValueTracking.h"
  21. #include "llvm/CodeGen/MachineBasicBlock.h"
  22. #include "llvm/CodeGen/MachineConstantPool.h"
  23. #include "llvm/CodeGen/MachineFrameInfo.h"
  24. #include "llvm/CodeGen/MachineModuleInfo.h"
  25. #include "llvm/IR/CallingConv.h"
  26. #include "llvm/IR/Constants.h"
  27. #include "llvm/IR/DataLayout.h"
  28. #include "llvm/IR/DebugInfo.h"
  29. #include "llvm/IR/DerivedTypes.h"
  30. #include "llvm/IR/Function.h"
  31. #include "llvm/IR/GlobalAlias.h"
  32. #include "llvm/IR/GlobalVariable.h"
  33. #include "llvm/IR/Intrinsics.h"
  34. #include "llvm/Support/CommandLine.h"
  35. #include "llvm/Support/Debug.h"
  36. #include "llvm/Support/ErrorHandling.h"
  37. #include "llvm/Support/ManagedStatic.h"
  38. #include "llvm/Support/MathExtras.h"
  39. #include "llvm/Support/Mutex.h"
  40. #include "llvm/Support/raw_ostream.h"
  41. #include "llvm/Target/TargetInstrInfo.h"
  42. #include "llvm/Target/TargetIntrinsicInfo.h"
  43. #include "llvm/Target/TargetLowering.h"
  44. #include "llvm/Target/TargetMachine.h"
  45. #include "llvm/Target/TargetOptions.h"
  46. #include "llvm/Target/TargetRegisterInfo.h"
  47. #include "llvm/Target/TargetSelectionDAGInfo.h"
  48. #include "llvm/Target/TargetSubtargetInfo.h"
  49. #include <algorithm>
  50. #include <cmath>
  51. #include <utility>
  52. using namespace llvm;
  53. /// makeVTList - Return an instance of the SDVTList struct initialized with the
  54. /// specified members.
  55. static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
  56. SDVTList Res = {VTs, NumVTs};
  57. return Res;
  58. }
  59. // Default null implementations of the callbacks.
  60. void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
  61. void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
  62. //===----------------------------------------------------------------------===//
  63. // ConstantFPSDNode Class
  64. //===----------------------------------------------------------------------===//
  65. /// isExactlyValue - We don't rely on operator== working on double values, as
  66. /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
  67. /// As such, this method can be used to do an exact bit-for-bit comparison of
  68. /// two floating point values.
  69. bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
  70. return getValueAPF().bitwiseIsEqual(V);
  71. }
  72. bool ConstantFPSDNode::isValueValidForType(EVT VT,
  73. const APFloat& Val) {
  74. assert(VT.isFloatingPoint() && "Can only convert between FP types");
  75. // convert modifies in place, so make a copy.
  76. APFloat Val2 = APFloat(Val);
  77. bool losesInfo;
  78. (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
  79. APFloat::rmNearestTiesToEven,
  80. &losesInfo);
  81. return !losesInfo;
  82. }
  83. //===----------------------------------------------------------------------===//
  84. // ISD Namespace
  85. //===----------------------------------------------------------------------===//
  86. /// isBuildVectorAllOnes - Return true if the specified node is a
  87. /// BUILD_VECTOR where all of the elements are ~0 or undef.
  88. bool ISD::isBuildVectorAllOnes(const SDNode *N) {
  89. // Look through a bit convert.
  90. while (N->getOpcode() == ISD::BITCAST)
  91. N = N->getOperand(0).getNode();
  92. if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
  93. unsigned i = 0, e = N->getNumOperands();
  94. // Skip over all of the undef values.
  95. while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
  96. ++i;
  97. // Do not accept an all-undef vector.
  98. if (i == e) return false;
  99. // Do not accept build_vectors that aren't all constants or which have non-~0
  100. // elements. We have to be a bit careful here, as the type of the constant
  101. // may not be the same as the type of the vector elements due to type
  102. // legalization (the elements are promoted to a legal type for the target and
  103. // a vector of a type may be legal when the base element type is not).
  104. // We only want to check enough bits to cover the vector elements, because
  105. // we care if the resultant vector is all ones, not whether the individual
  106. // constants are.
  107. SDValue NotZero = N->getOperand(i);
  108. unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
  109. if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
  110. if (CN->getAPIntValue().countTrailingOnes() < EltSize)
  111. return false;
  112. } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
  113. if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
  114. return false;
  115. } else
  116. return false;
  117. // Okay, we have at least one ~0 value, check to see if the rest match or are
  118. // undefs. Even with the above element type twiddling, this should be OK, as
  119. // the same type legalization should have applied to all the elements.
  120. for (++i; i != e; ++i)
  121. if (N->getOperand(i) != NotZero &&
  122. N->getOperand(i).getOpcode() != ISD::UNDEF)
  123. return false;
  124. return true;
  125. }
  126. /// isBuildVectorAllZeros - Return true if the specified node is a
  127. /// BUILD_VECTOR where all of the elements are 0 or undef.
  128. bool ISD::isBuildVectorAllZeros(const SDNode *N) {
  129. // Look through a bit convert.
  130. while (N->getOpcode() == ISD::BITCAST)
  131. N = N->getOperand(0).getNode();
  132. if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
  133. bool IsAllUndef = true;
  134. for (const SDValue &Op : N->op_values()) {
  135. if (Op.getOpcode() == ISD::UNDEF)
  136. continue;
  137. IsAllUndef = false;
  138. // Do not accept build_vectors that aren't all constants or which have non-0
  139. // elements. We have to be a bit careful here, as the type of the constant
  140. // may not be the same as the type of the vector elements due to type
  141. // legalization (the elements are promoted to a legal type for the target
  142. // and a vector of a type may be legal when the base element type is not).
  143. // We only want to check enough bits to cover the vector elements, because
  144. // we care if the resultant vector is all zeros, not whether the individual
  145. // constants are.
  146. unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
  147. if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
  148. if (CN->getAPIntValue().countTrailingZeros() < EltSize)
  149. return false;
  150. } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
  151. if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
  152. return false;
  153. } else
  154. return false;
  155. }
  156. // Do not accept an all-undef vector.
  157. if (IsAllUndef)
  158. return false;
  159. return true;
  160. }
  161. /// \brief Return true if the specified node is a BUILD_VECTOR node of
  162. /// all ConstantSDNode or undef.
  163. bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
  164. if (N->getOpcode() != ISD::BUILD_VECTOR)
  165. return false;
  166. for (const SDValue &Op : N->op_values()) {
  167. if (Op.getOpcode() == ISD::UNDEF)
  168. continue;
  169. if (!isa<ConstantSDNode>(Op))
  170. return false;
  171. }
  172. return true;
  173. }
  174. /// \brief Return true if the specified node is a BUILD_VECTOR node of
  175. /// all ConstantFPSDNode or undef.
  176. bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
  177. if (N->getOpcode() != ISD::BUILD_VECTOR)
  178. return false;
  179. for (const SDValue &Op : N->op_values()) {
  180. if (Op.getOpcode() == ISD::UNDEF)
  181. continue;
  182. if (!isa<ConstantFPSDNode>(Op))
  183. return false;
  184. }
  185. return true;
  186. }
  187. /// isScalarToVector - Return true if the specified node is a
  188. /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
  189. /// element is not an undef.
  190. bool ISD::isScalarToVector(const SDNode *N) {
  191. if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
  192. return true;
  193. if (N->getOpcode() != ISD::BUILD_VECTOR)
  194. return false;
  195. if (N->getOperand(0).getOpcode() == ISD::UNDEF)
  196. return false;
  197. unsigned NumElems = N->getNumOperands();
  198. if (NumElems == 1)
  199. return false;
  200. for (unsigned i = 1; i < NumElems; ++i) {
  201. SDValue V = N->getOperand(i);
  202. if (V.getOpcode() != ISD::UNDEF)
  203. return false;
  204. }
  205. return true;
  206. }
  207. /// allOperandsUndef - Return true if the node has at least one operand
  208. /// and all operands of the specified node are ISD::UNDEF.
  209. bool ISD::allOperandsUndef(const SDNode *N) {
  210. // Return false if the node has no operands.
  211. // This is "logically inconsistent" with the definition of "all" but
  212. // is probably the desired behavior.
  213. if (N->getNumOperands() == 0)
  214. return false;
  215. for (const SDValue &Op : N->op_values())
  216. if (Op.getOpcode() != ISD::UNDEF)
  217. return false;
  218. return true;
  219. }
  220. ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
  221. switch (ExtType) {
  222. case ISD::EXTLOAD:
  223. return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
  224. case ISD::SEXTLOAD:
  225. return ISD::SIGN_EXTEND;
  226. case ISD::ZEXTLOAD:
  227. return ISD::ZERO_EXTEND;
  228. default:
  229. break;
  230. }
  231. llvm_unreachable("Invalid LoadExtType");
  232. }
  233. /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
  234. /// when given the operation for (X op Y).
  235. ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
  236. // To perform this operation, we just need to swap the L and G bits of the
  237. // operation.
  238. unsigned OldL = (Operation >> 2) & 1;
  239. unsigned OldG = (Operation >> 1) & 1;
  240. return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
  241. (OldL << 1) | // New G bit
  242. (OldG << 2)); // New L bit.
  243. }
  244. /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
  245. /// 'op' is a valid SetCC operation.
  246. ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
  247. unsigned Operation = Op;
  248. if (isInteger)
  249. Operation ^= 7; // Flip L, G, E bits, but not U.
  250. else
  251. Operation ^= 15; // Flip all of the condition bits.
  252. if (Operation > ISD::SETTRUE2)
  253. Operation &= ~8; // Don't let N and U bits get set.
  254. return ISD::CondCode(Operation);
  255. }
  256. /// isSignedOp - For an integer comparison, return 1 if the comparison is a
  257. /// signed operation and 2 if the result is an unsigned comparison. Return zero
  258. /// if the operation does not depend on the sign of the input (setne and seteq).
  259. static int isSignedOp(ISD::CondCode Opcode) {
  260. switch (Opcode) {
  261. default: llvm_unreachable("Illegal integer setcc operation!");
  262. case ISD::SETEQ:
  263. case ISD::SETNE: return 0;
  264. case ISD::SETLT:
  265. case ISD::SETLE:
  266. case ISD::SETGT:
  267. case ISD::SETGE: return 1;
  268. case ISD::SETULT:
  269. case ISD::SETULE:
  270. case ISD::SETUGT:
  271. case ISD::SETUGE: return 2;
  272. }
  273. }
  274. /// getSetCCOrOperation - Return the result of a logical OR between different
  275. /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
  276. /// returns SETCC_INVALID if it is not possible to represent the resultant
  277. /// comparison.
  278. ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
  279. bool isInteger) {
  280. if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
  281. // Cannot fold a signed integer setcc with an unsigned integer setcc.
  282. return ISD::SETCC_INVALID;
  283. unsigned Op = Op1 | Op2; // Combine all of the condition bits.
  284. // If the N and U bits get set then the resultant comparison DOES suddenly
  285. // care about orderedness, and is true when ordered.
  286. if (Op > ISD::SETTRUE2)
  287. Op &= ~16; // Clear the U bit if the N bit is set.
  288. // Canonicalize illegal integer setcc's.
  289. if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
  290. Op = ISD::SETNE;
  291. return ISD::CondCode(Op);
  292. }
  293. /// getSetCCAndOperation - Return the result of a logical AND between different
  294. /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
  295. /// function returns zero if it is not possible to represent the resultant
  296. /// comparison.
  297. ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
  298. bool isInteger) {
  299. if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
  300. // Cannot fold a signed setcc with an unsigned setcc.
  301. return ISD::SETCC_INVALID;
  302. // Combine all of the condition bits.
  303. ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
  304. // Canonicalize illegal integer setcc's.
  305. if (isInteger) {
  306. switch (Result) {
  307. default: break;
  308. case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
  309. case ISD::SETOEQ: // SETEQ & SETU[LG]E
  310. case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
  311. case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
  312. case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
  313. }
  314. }
  315. return Result;
  316. }
  317. //===----------------------------------------------------------------------===//
  318. // SDNode Profile Support
  319. //===----------------------------------------------------------------------===//
  320. /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
  321. ///
  322. static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
  323. ID.AddInteger(OpC);
  324. }
  325. /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
  326. /// solely with their pointer.
  327. static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
  328. ID.AddPointer(VTList.VTs);
  329. }
  330. /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
  331. ///
  332. static void AddNodeIDOperands(FoldingSetNodeID &ID,
  333. ArrayRef<SDValue> Ops) {
  334. for (auto& Op : Ops) {
  335. ID.AddPointer(Op.getNode());
  336. ID.AddInteger(Op.getResNo());
  337. }
  338. }
  339. /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
  340. ///
  341. static void AddNodeIDOperands(FoldingSetNodeID &ID,
  342. ArrayRef<SDUse> Ops) {
  343. for (auto& Op : Ops) {
  344. ID.AddPointer(Op.getNode());
  345. ID.AddInteger(Op.getResNo());
  346. }
  347. }
  348. /// Add logical or fast math flag values to FoldingSetNodeID value.
  349. static void AddNodeIDFlags(FoldingSetNodeID &ID, unsigned Opcode,
  350. const SDNodeFlags *Flags) {
  351. if (!Flags || !isBinOpWithFlags(Opcode))
  352. return;
  353. unsigned RawFlags = Flags->getRawFlags();
  354. // If no flags are set, do not alter the ID. We must match the ID of nodes
  355. // that were created without explicitly specifying flags. This also saves time
  356. // and allows a gradual increase in API usage of the optional optimization
  357. // flags.
  358. if (RawFlags != 0)
  359. ID.AddInteger(RawFlags);
  360. }
  361. static void AddNodeIDFlags(FoldingSetNodeID &ID, const SDNode *N) {
  362. if (auto *Node = dyn_cast<BinaryWithFlagsSDNode>(N))
  363. AddNodeIDFlags(ID, Node->getOpcode(), &Node->Flags);
  364. }
  365. static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
  366. SDVTList VTList, ArrayRef<SDValue> OpList) {
  367. AddNodeIDOpcode(ID, OpC);
  368. AddNodeIDValueTypes(ID, VTList);
  369. AddNodeIDOperands(ID, OpList);
  370. }
  371. /// If this is an SDNode with special info, add this info to the NodeID data.
  372. static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
  373. switch (N->getOpcode()) {
  374. case ISD::TargetExternalSymbol:
  375. case ISD::ExternalSymbol:
  376. case ISD::MCSymbol:
  377. llvm_unreachable("Should only be used on nodes with operands");
  378. default: break; // Normal nodes don't need extra info.
  379. case ISD::TargetConstant:
  380. case ISD::Constant: {
  381. const ConstantSDNode *C = cast<ConstantSDNode>(N);
  382. ID.AddPointer(C->getConstantIntValue());
  383. ID.AddBoolean(C->isOpaque());
  384. break;
  385. }
  386. case ISD::TargetConstantFP:
  387. case ISD::ConstantFP: {
  388. ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
  389. break;
  390. }
  391. case ISD::TargetGlobalAddress:
  392. case ISD::GlobalAddress:
  393. case ISD::TargetGlobalTLSAddress:
  394. case ISD::GlobalTLSAddress: {
  395. const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
  396. ID.AddPointer(GA->getGlobal());
  397. ID.AddInteger(GA->getOffset());
  398. ID.AddInteger(GA->getTargetFlags());
  399. ID.AddInteger(GA->getAddressSpace());
  400. break;
  401. }
  402. case ISD::BasicBlock:
  403. ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
  404. break;
  405. case ISD::Register:
  406. ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
  407. break;
  408. case ISD::RegisterMask:
  409. ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
  410. break;
  411. case ISD::SRCVALUE:
  412. ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
  413. break;
  414. case ISD::FrameIndex:
  415. case ISD::TargetFrameIndex:
  416. ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
  417. break;
  418. case ISD::JumpTable:
  419. case ISD::TargetJumpTable:
  420. ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
  421. ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
  422. break;
  423. case ISD::ConstantPool:
  424. case ISD::TargetConstantPool: {
  425. const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
  426. ID.AddInteger(CP->getAlignment());
  427. ID.AddInteger(CP->getOffset());
  428. if (CP->isMachineConstantPoolEntry())
  429. CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
  430. else
  431. ID.AddPointer(CP->getConstVal());
  432. ID.AddInteger(CP->getTargetFlags());
  433. break;
  434. }
  435. case ISD::TargetIndex: {
  436. const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
  437. ID.AddInteger(TI->getIndex());
  438. ID.AddInteger(TI->getOffset());
  439. ID.AddInteger(TI->getTargetFlags());
  440. break;
  441. }
  442. case ISD::LOAD: {
  443. const LoadSDNode *LD = cast<LoadSDNode>(N);
  444. ID.AddInteger(LD->getMemoryVT().getRawBits());
  445. ID.AddInteger(LD->getRawSubclassData());
  446. ID.AddInteger(LD->getPointerInfo().getAddrSpace());
  447. break;
  448. }
  449. case ISD::STORE: {
  450. const StoreSDNode *ST = cast<StoreSDNode>(N);
  451. ID.AddInteger(ST->getMemoryVT().getRawBits());
  452. ID.AddInteger(ST->getRawSubclassData());
  453. ID.AddInteger(ST->getPointerInfo().getAddrSpace());
  454. break;
  455. }
  456. case ISD::ATOMIC_CMP_SWAP:
  457. case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
  458. case ISD::ATOMIC_SWAP:
  459. case ISD::ATOMIC_LOAD_ADD:
  460. case ISD::ATOMIC_LOAD_SUB:
  461. case ISD::ATOMIC_LOAD_AND:
  462. case ISD::ATOMIC_LOAD_OR:
  463. case ISD::ATOMIC_LOAD_XOR:
  464. case ISD::ATOMIC_LOAD_NAND:
  465. case ISD::ATOMIC_LOAD_MIN:
  466. case ISD::ATOMIC_LOAD_MAX:
  467. case ISD::ATOMIC_LOAD_UMIN:
  468. case ISD::ATOMIC_LOAD_UMAX:
  469. case ISD::ATOMIC_LOAD:
  470. case ISD::ATOMIC_STORE: {
  471. const AtomicSDNode *AT = cast<AtomicSDNode>(N);
  472. ID.AddInteger(AT->getMemoryVT().getRawBits());
  473. ID.AddInteger(AT->getRawSubclassData());
  474. ID.AddInteger(AT->getPointerInfo().getAddrSpace());
  475. break;
  476. }
  477. case ISD::PREFETCH: {
  478. const MemSDNode *PF = cast<MemSDNode>(N);
  479. ID.AddInteger(PF->getPointerInfo().getAddrSpace());
  480. break;
  481. }
  482. case ISD::VECTOR_SHUFFLE: {
  483. const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
  484. for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
  485. i != e; ++i)
  486. ID.AddInteger(SVN->getMaskElt(i));
  487. break;
  488. }
  489. case ISD::TargetBlockAddress:
  490. case ISD::BlockAddress: {
  491. const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
  492. ID.AddPointer(BA->getBlockAddress());
  493. ID.AddInteger(BA->getOffset());
  494. ID.AddInteger(BA->getTargetFlags());
  495. break;
  496. }
  497. } // end switch (N->getOpcode())
  498. AddNodeIDFlags(ID, N);
  499. // Target specific memory nodes could also have address spaces to check.
  500. if (N->isTargetMemoryOpcode())
  501. ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
  502. }
  503. /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
  504. /// data.
  505. static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
  506. AddNodeIDOpcode(ID, N->getOpcode());
  507. // Add the return value info.
  508. AddNodeIDValueTypes(ID, N->getVTList());
  509. // Add the operand info.
  510. AddNodeIDOperands(ID, N->ops());
  511. // Handle SDNode leafs with special info.
  512. AddNodeIDCustom(ID, N);
  513. }
  514. /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
  515. /// the CSE map that carries volatility, temporalness, indexing mode, and
  516. /// extension/truncation information.
  517. ///
  518. static inline unsigned
  519. encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
  520. bool isNonTemporal, bool isInvariant) {
  521. assert((ConvType & 3) == ConvType &&
  522. "ConvType may not require more than 2 bits!");
  523. assert((AM & 7) == AM &&
  524. "AM may not require more than 3 bits!");
  525. return ConvType |
  526. (AM << 2) |
  527. (isVolatile << 5) |
  528. (isNonTemporal << 6) |
  529. (isInvariant << 7);
  530. }
  531. //===----------------------------------------------------------------------===//
  532. // SelectionDAG Class
  533. //===----------------------------------------------------------------------===//
  534. /// doNotCSE - Return true if CSE should not be performed for this node.
  535. static bool doNotCSE(SDNode *N) {
  536. if (N->getValueType(0) == MVT::Glue)
  537. return true; // Never CSE anything that produces a flag.
  538. switch (N->getOpcode()) {
  539. default: break;
  540. case ISD::HANDLENODE:
  541. case ISD::EH_LABEL:
  542. return true; // Never CSE these nodes.
  543. }
  544. // Check that remaining values produced are not flags.
  545. for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
  546. if (N->getValueType(i) == MVT::Glue)
  547. return true; // Never CSE anything that produces a flag.
  548. return false;
  549. }
  550. /// RemoveDeadNodes - This method deletes all unreachable nodes in the
  551. /// SelectionDAG.
  552. void SelectionDAG::RemoveDeadNodes() {
  553. // Create a dummy node (which is not added to allnodes), that adds a reference
  554. // to the root node, preventing it from being deleted.
  555. HandleSDNode Dummy(getRoot());
  556. SmallVector<SDNode*, 128> DeadNodes;
  557. // Add all obviously-dead nodes to the DeadNodes worklist.
  558. for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
  559. if (I->use_empty())
  560. DeadNodes.push_back(I);
  561. RemoveDeadNodes(DeadNodes);
  562. // If the root changed (e.g. it was a dead load, update the root).
  563. setRoot(Dummy.getValue());
  564. }
  565. /// RemoveDeadNodes - This method deletes the unreachable nodes in the
  566. /// given list, and any nodes that become unreachable as a result.
  567. void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
  568. // Process the worklist, deleting the nodes and adding their uses to the
  569. // worklist.
  570. while (!DeadNodes.empty()) {
  571. SDNode *N = DeadNodes.pop_back_val();
  572. for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
  573. DUL->NodeDeleted(N, nullptr);
  574. // Take the node out of the appropriate CSE map.
  575. RemoveNodeFromCSEMaps(N);
  576. // Next, brutally remove the operand list. This is safe to do, as there are
  577. // no cycles in the graph.
  578. for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
  579. SDUse &Use = *I++;
  580. SDNode *Operand = Use.getNode();
  581. Use.set(SDValue());
  582. // Now that we removed this operand, see if there are no uses of it left.
  583. if (Operand->use_empty())
  584. DeadNodes.push_back(Operand);
  585. }
  586. DeallocateNode(N);
  587. }
  588. }
  589. void SelectionDAG::RemoveDeadNode(SDNode *N){
  590. SmallVector<SDNode*, 16> DeadNodes(1, N);
  591. // Create a dummy node that adds a reference to the root node, preventing
  592. // it from being deleted. (This matters if the root is an operand of the
  593. // dead node.)
  594. HandleSDNode Dummy(getRoot());
  595. RemoveDeadNodes(DeadNodes);
  596. }
  597. void SelectionDAG::DeleteNode(SDNode *N) {
  598. // First take this out of the appropriate CSE map.
  599. RemoveNodeFromCSEMaps(N);
  600. // Finally, remove uses due to operands of this node, remove from the
  601. // AllNodes list, and delete the node.
  602. DeleteNodeNotInCSEMaps(N);
  603. }
  604. void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
  605. assert(N != AllNodes.begin() && "Cannot delete the entry node!");
  606. assert(N->use_empty() && "Cannot delete a node that is not dead!");
  607. // Drop all of the operands and decrement used node's use counts.
  608. N->DropOperands();
  609. DeallocateNode(N);
  610. }
  611. void SDDbgInfo::erase(const SDNode *Node) {
  612. DbgValMapType::iterator I = DbgValMap.find(Node);
  613. if (I == DbgValMap.end())
  614. return;
  615. for (auto &Val: I->second)
  616. Val->setIsInvalidated();
  617. DbgValMap.erase(I);
  618. }
  619. void SelectionDAG::DeallocateNode(SDNode *N) {
  620. if (N->OperandsNeedDelete)
  621. delete[] N->OperandList;
  622. // Set the opcode to DELETED_NODE to help catch bugs when node
  623. // memory is reallocated.
  624. N->NodeType = ISD::DELETED_NODE;
  625. NodeAllocator.Deallocate(AllNodes.remove(N));
  626. // If any of the SDDbgValue nodes refer to this SDNode, invalidate
  627. // them and forget about that node.
  628. DbgInfo->erase(N);
  629. }
  630. #ifndef NDEBUG
  631. /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
  632. static void VerifySDNode(SDNode *N) {
  633. switch (N->getOpcode()) {
  634. default:
  635. break;
  636. case ISD::BUILD_PAIR: {
  637. EVT VT = N->getValueType(0);
  638. assert(N->getNumValues() == 1 && "Too many results!");
  639. assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
  640. "Wrong return type!");
  641. assert(N->getNumOperands() == 2 && "Wrong number of operands!");
  642. assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
  643. "Mismatched operand types!");
  644. assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
  645. "Wrong operand type!");
  646. assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
  647. "Wrong return type size");
  648. break;
  649. }
  650. case ISD::BUILD_VECTOR: {
  651. assert(N->getNumValues() == 1 && "Too many results!");
  652. assert(N->getValueType(0).isVector() && "Wrong return type!");
  653. assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
  654. "Wrong number of operands!");
  655. EVT EltVT = N->getValueType(0).getVectorElementType();
  656. for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
  657. assert((I->getValueType() == EltVT ||
  658. (EltVT.isInteger() && I->getValueType().isInteger() &&
  659. EltVT.bitsLE(I->getValueType()))) &&
  660. "Wrong operand type!");
  661. assert(I->getValueType() == N->getOperand(0).getValueType() &&
  662. "Operands must all have the same type");
  663. }
  664. break;
  665. }
  666. }
  667. }
  668. #endif // NDEBUG
  669. /// \brief Insert a newly allocated node into the DAG.
  670. ///
  671. /// Handles insertion into the all nodes list and CSE map, as well as
  672. /// verification and other common operations when a new node is allocated.
  673. void SelectionDAG::InsertNode(SDNode *N) {
  674. AllNodes.push_back(N);
  675. #ifndef NDEBUG
  676. VerifySDNode(N);
  677. #endif
  678. }
  679. /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
  680. /// correspond to it. This is useful when we're about to delete or repurpose
  681. /// the node. We don't want future request for structurally identical nodes
  682. /// to return N anymore.
  683. bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
  684. bool Erased = false;
  685. switch (N->getOpcode()) {
  686. case ISD::HANDLENODE: return false; // noop.
  687. case ISD::CONDCODE:
  688. assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
  689. "Cond code doesn't exist!");
  690. Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
  691. CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
  692. break;
  693. case ISD::ExternalSymbol:
  694. Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
  695. break;
  696. case ISD::TargetExternalSymbol: {
  697. ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
  698. Erased = TargetExternalSymbols.erase(
  699. std::pair<std::string,unsigned char>(ESN->getSymbol(),
  700. ESN->getTargetFlags()));
  701. break;
  702. }
  703. case ISD::MCSymbol: {
  704. auto *MCSN = cast<MCSymbolSDNode>(N);
  705. Erased = MCSymbols.erase(MCSN->getMCSymbol());
  706. break;
  707. }
  708. case ISD::VALUETYPE: {
  709. EVT VT = cast<VTSDNode>(N)->getVT();
  710. if (VT.isExtended()) {
  711. Erased = ExtendedValueTypeNodes.erase(VT);
  712. } else {
  713. Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
  714. ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
  715. }
  716. break;
  717. }
  718. default:
  719. // Remove it from the CSE Map.
  720. assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
  721. assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
  722. Erased = CSEMap.RemoveNode(N);
  723. break;
  724. }
  725. #ifndef NDEBUG
  726. // Verify that the node was actually in one of the CSE maps, unless it has a
  727. // flag result (which cannot be CSE'd) or is one of the special cases that are
  728. // not subject to CSE.
  729. if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
  730. !N->isMachineOpcode() && !doNotCSE(N)) {
  731. N->dump(this);
  732. dbgs() << "\n";
  733. llvm_unreachable("Node is not in map!");
  734. }
  735. #endif
  736. return Erased;
  737. }
  738. /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
  739. /// maps and modified in place. Add it back to the CSE maps, unless an identical
  740. /// node already exists, in which case transfer all its users to the existing
  741. /// node. This transfer can potentially trigger recursive merging.
  742. ///
  743. void
  744. SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
  745. // For node types that aren't CSE'd, just act as if no identical node
  746. // already exists.
  747. if (!doNotCSE(N)) {
  748. SDNode *Existing = CSEMap.GetOrInsertNode(N);
  749. if (Existing != N) {
  750. // If there was already an existing matching node, use ReplaceAllUsesWith
  751. // to replace the dead one with the existing one. This can cause
  752. // recursive merging of other unrelated nodes down the line.
  753. ReplaceAllUsesWith(N, Existing);
  754. // N is now dead. Inform the listeners and delete it.
  755. for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
  756. DUL->NodeDeleted(N, Existing);
  757. DeleteNodeNotInCSEMaps(N);
  758. return;
  759. }
  760. }
  761. // If the node doesn't already exist, we updated it. Inform listeners.
  762. for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
  763. DUL->NodeUpdated(N);
  764. }
  765. /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
  766. /// were replaced with those specified. If this node is never memoized,
  767. /// return null, otherwise return a pointer to the slot it would take. If a
  768. /// node already exists with these operands, the slot will be non-null.
  769. SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
  770. void *&InsertPos) {
  771. if (doNotCSE(N))
  772. return nullptr;
  773. SDValue Ops[] = { Op };
  774. FoldingSetNodeID ID;
  775. AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
  776. AddNodeIDCustom(ID, N);
  777. SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
  778. return Node;
  779. }
  780. /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
  781. /// were replaced with those specified. If this node is never memoized,
  782. /// return null, otherwise return a pointer to the slot it would take. If a
  783. /// node already exists with these operands, the slot will be non-null.
  784. SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
  785. SDValue Op1, SDValue Op2,
  786. void *&InsertPos) {
  787. if (doNotCSE(N))
  788. return nullptr;
  789. SDValue Ops[] = { Op1, Op2 };
  790. FoldingSetNodeID ID;
  791. AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
  792. AddNodeIDCustom(ID, N);
  793. SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
  794. return Node;
  795. }
  796. /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
  797. /// were replaced with those specified. If this node is never memoized,
  798. /// return null, otherwise return a pointer to the slot it would take. If a
  799. /// node already exists with these operands, the slot will be non-null.
  800. SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
  801. void *&InsertPos) {
  802. if (doNotCSE(N))
  803. return nullptr;
  804. FoldingSetNodeID ID;
  805. AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
  806. AddNodeIDCustom(ID, N);
  807. SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos);
  808. return Node;
  809. }
  810. /// getEVTAlignment - Compute the default alignment value for the
  811. /// given type.
  812. ///
  813. unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
  814. Type *Ty = VT == MVT::iPTR ?
  815. PointerType::get(Type::getInt8Ty(*getContext()), 0) :
  816. VT.getTypeForEVT(*getContext());
  817. return getDataLayout().getABITypeAlignment(Ty);
  818. }
  819. // EntryNode could meaningfully have debug info if we can find it...
  820. SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
  821. : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL),
  822. EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
  823. Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
  824. UpdateListeners(nullptr) {
  825. AllNodes.push_back(&EntryNode);
  826. DbgInfo = new SDDbgInfo();
  827. }
  828. void SelectionDAG::init(MachineFunction &mf) {
  829. MF = &mf;
  830. TLI = getSubtarget().getTargetLowering();
  831. TSI = getSubtarget().getSelectionDAGInfo();
  832. Context = &mf.getFunction()->getContext();
  833. }
  834. SelectionDAG::~SelectionDAG() {
  835. assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
  836. allnodes_clear();
  837. delete DbgInfo;
  838. }
  839. void SelectionDAG::allnodes_clear() {
  840. assert(&*AllNodes.begin() == &EntryNode);
  841. AllNodes.remove(AllNodes.begin());
  842. while (!AllNodes.empty())
  843. DeallocateNode(AllNodes.begin());
  844. }
  845. BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL,
  846. SDVTList VTs, SDValue N1,
  847. SDValue N2,
  848. const SDNodeFlags *Flags) {
  849. if (isBinOpWithFlags(Opcode)) {
  850. // If no flags were passed in, use a default flags object.
  851. SDNodeFlags F;
  852. if (Flags == nullptr)
  853. Flags = &F;
  854. BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode(
  855. Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, *Flags);
  856. return FN;
  857. }
  858. BinarySDNode *N = new (NodeAllocator)
  859. BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
  860. return N;
  861. }
  862. SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
  863. void *&InsertPos) {
  864. SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
  865. if (N) {
  866. switch (N->getOpcode()) {
  867. default: break;
  868. case ISD::Constant:
  869. case ISD::ConstantFP:
  870. llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
  871. "debug location. Use another overload.");
  872. }
  873. }
  874. return N;
  875. }
  876. SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
  877. DebugLoc DL, void *&InsertPos) {
  878. SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
  879. if (N) {
  880. switch (N->getOpcode()) {
  881. default: break; // Process only regular (non-target) constant nodes.
  882. case ISD::Constant:
  883. case ISD::ConstantFP:
  884. // Erase debug location from the node if the node is used at several
  885. // different places to do not propagate one location to all uses as it
  886. // leads to incorrect debug info.
  887. if (N->getDebugLoc() != DL)
  888. N->setDebugLoc(DebugLoc());
  889. break;
  890. }
  891. }
  892. return N;
  893. }
  894. void SelectionDAG::clear() {
  895. allnodes_clear();
  896. OperandAllocator.Reset();
  897. CSEMap.clear();
  898. ExtendedValueTypeNodes.clear();
  899. ExternalSymbols.clear();
  900. TargetExternalSymbols.clear();
  901. MCSymbols.clear();
  902. std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
  903. static_cast<CondCodeSDNode*>(nullptr));
  904. std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
  905. static_cast<SDNode*>(nullptr));
  906. EntryNode.UseList = nullptr;
  907. AllNodes.push_back(&EntryNode);
  908. Root = getEntryNode();
  909. DbgInfo->clear();
  910. }
  911. SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
  912. return VT.bitsGT(Op.getValueType()) ?
  913. getNode(ISD::ANY_EXTEND, DL, VT, Op) :
  914. getNode(ISD::TRUNCATE, DL, VT, Op);
  915. }
  916. SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
  917. return VT.bitsGT(Op.getValueType()) ?
  918. getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
  919. getNode(ISD::TRUNCATE, DL, VT, Op);
  920. }
  921. SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
  922. return VT.bitsGT(Op.getValueType()) ?
  923. getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
  924. getNode(ISD::TRUNCATE, DL, VT, Op);
  925. }
  926. SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT,
  927. EVT OpVT) {
  928. if (VT.bitsLE(Op.getValueType()))
  929. return getNode(ISD::TRUNCATE, SL, VT, Op);
  930. TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
  931. return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
  932. }
  933. SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
  934. assert(!VT.isVector() &&
  935. "getZeroExtendInReg should use the vector element type instead of "
  936. "the vector type!");
  937. if (Op.getValueType() == VT) return Op;
  938. unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
  939. APInt Imm = APInt::getLowBitsSet(BitWidth,
  940. VT.getSizeInBits());
  941. return getNode(ISD::AND, DL, Op.getValueType(), Op,
  942. getConstant(Imm, DL, Op.getValueType()));
  943. }
  944. SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
  945. assert(VT.isVector() && "This DAG node is restricted to vector types.");
  946. assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
  947. "The sizes of the input and result must match in order to perform the "
  948. "extend in-register.");
  949. assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
  950. "The destination vector type must have fewer lanes than the input.");
  951. return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
  952. }
  953. SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
  954. assert(VT.isVector() && "This DAG node is restricted to vector types.");
  955. assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
  956. "The sizes of the input and result must match in order to perform the "
  957. "extend in-register.");
  958. assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
  959. "The destination vector type must have fewer lanes than the input.");
  960. return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
  961. }
  962. SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) {
  963. assert(VT.isVector() && "This DAG node is restricted to vector types.");
  964. assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() &&
  965. "The sizes of the input and result must match in order to perform the "
  966. "extend in-register.");
  967. assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
  968. "The destination vector type must have fewer lanes than the input.");
  969. return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
  970. }
  971. /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
  972. ///
  973. SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
  974. EVT EltVT = VT.getScalarType();
  975. SDValue NegOne =
  976. getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
  977. return getNode(ISD::XOR, DL, VT, Val, NegOne);
  978. }
  979. SDValue SelectionDAG::getLogicalNOT(SDLoc DL, SDValue Val, EVT VT) {
  980. EVT EltVT = VT.getScalarType();
  981. SDValue TrueValue;
  982. switch (TLI->getBooleanContents(VT)) {
  983. case TargetLowering::ZeroOrOneBooleanContent:
  984. case TargetLowering::UndefinedBooleanContent:
  985. TrueValue = getConstant(1, DL, VT);
  986. break;
  987. case TargetLowering::ZeroOrNegativeOneBooleanContent:
  988. TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL,
  989. VT);
  990. break;
  991. }
  992. return getNode(ISD::XOR, DL, VT, Val, TrueValue);
  993. }
  994. SDValue SelectionDAG::getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isT,
  995. bool isO) {
  996. EVT EltVT = VT.getScalarType();
  997. assert((EltVT.getSizeInBits() >= 64 ||
  998. (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
  999. "getConstant with a uint64_t value that doesn't fit in the type!");
  1000. return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
  1001. }
  1002. SDValue SelectionDAG::getConstant(const APInt &Val, SDLoc DL, EVT VT, bool isT,
  1003. bool isO)
  1004. {
  1005. return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
  1006. }
  1007. SDValue SelectionDAG::getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
  1008. bool isT, bool isO) {
  1009. assert(VT.isInteger() && "Cannot create FP integer constant!");
  1010. EVT EltVT = VT.getScalarType();
  1011. const ConstantInt *Elt = &Val;
  1012. // In some cases the vector type is legal but the element type is illegal and
  1013. // needs to be promoted, for example v8i8 on ARM. In this case, promote the
  1014. // inserted value (the type does not need to match the vector element type).
  1015. // Any extra bits introduced will be truncated away.
  1016. if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
  1017. TargetLowering::TypePromoteInteger) {
  1018. EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
  1019. APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
  1020. Elt = ConstantInt::get(*getContext(), NewVal);
  1021. }
  1022. // In other cases the element type is illegal and needs to be expanded, for
  1023. // example v2i64 on MIPS32. In this case, find the nearest legal type, split
  1024. // the value into n parts and use a vector type with n-times the elements.
  1025. // Then bitcast to the type requested.
  1026. // Legalizing constants too early makes the DAGCombiner's job harder so we
  1027. // only legalize if the DAG tells us we must produce legal types.
  1028. else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
  1029. TLI->getTypeAction(*getContext(), EltVT) ==
  1030. TargetLowering::TypeExpandInteger) {
  1031. APInt NewVal = Elt->getValue();
  1032. EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
  1033. unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
  1034. unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
  1035. EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
  1036. // Check the temporary vector is the correct size. If this fails then
  1037. // getTypeToTransformTo() probably returned a type whose size (in bits)
  1038. // isn't a power-of-2 factor of the requested type size.
  1039. assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
  1040. SmallVector<SDValue, 2> EltParts;
  1041. for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
  1042. EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
  1043. .trunc(ViaEltSizeInBits), DL,
  1044. ViaEltVT, isT, isO));
  1045. }
  1046. // EltParts is currently in little endian order. If we actually want
  1047. // big-endian order then reverse it now.
  1048. if (getDataLayout().isBigEndian())
  1049. std::reverse(EltParts.begin(), EltParts.end());
  1050. // The elements must be reversed when the element order is different
  1051. // to the endianness of the elements (because the BITCAST is itself a
  1052. // vector shuffle in this situation). However, we do not need any code to
  1053. // perform this reversal because getConstant() is producing a vector
  1054. // splat.
  1055. // This situation occurs in MIPS MSA.
  1056. SmallVector<SDValue, 8> Ops;
  1057. for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
  1058. Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
  1059. SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
  1060. getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
  1061. Ops));
  1062. return Result;
  1063. }
  1064. assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
  1065. "APInt size does not match type size!");
  1066. unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
  1067. FoldingSetNodeID ID;
  1068. AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
  1069. ID.AddPointer(Elt);
  1070. ID.AddBoolean(isO);
  1071. void *IP = nullptr;
  1072. SDNode *N = nullptr;
  1073. if ((N = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)))
  1074. if (!VT.isVector())
  1075. return SDValue(N, 0);
  1076. if (!N) {
  1077. N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, DL.getDebugLoc(),
  1078. EltVT);
  1079. CSEMap.InsertNode(N, IP);
  1080. InsertNode(N);
  1081. }
  1082. SDValue Result(N, 0);
  1083. if (VT.isVector()) {
  1084. SmallVector<SDValue, 8> Ops;
  1085. Ops.assign(VT.getVectorNumElements(), Result);
  1086. Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
  1087. }
  1088. return Result;
  1089. }
  1090. SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget) {
  1091. return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
  1092. }
  1093. SDValue SelectionDAG::getConstantFP(const APFloat& V, SDLoc DL, EVT VT,
  1094. bool isTarget) {
  1095. return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
  1096. }
  1097. SDValue SelectionDAG::getConstantFP(const ConstantFP& V, SDLoc DL, EVT VT,
  1098. bool isTarget){
  1099. assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
  1100. EVT EltVT = VT.getScalarType();
  1101. // Do the map lookup using the actual bit pattern for the floating point
  1102. // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
  1103. // we don't have issues with SNANs.
  1104. unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
  1105. FoldingSetNodeID ID;
  1106. AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
  1107. ID.AddPointer(&V);
  1108. void *IP = nullptr;
  1109. SDNode *N = nullptr;
  1110. if ((N = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)))
  1111. if (!VT.isVector())
  1112. return SDValue(N, 0);
  1113. if (!N) {
  1114. N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, DL.getDebugLoc(),
  1115. EltVT);
  1116. CSEMap.InsertNode(N, IP);
  1117. InsertNode(N);
  1118. }
  1119. SDValue Result(N, 0);
  1120. if (VT.isVector()) {
  1121. SmallVector<SDValue, 8> Ops;
  1122. Ops.assign(VT.getVectorNumElements(), Result);
  1123. Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
  1124. }
  1125. return Result;
  1126. }
  1127. SDValue SelectionDAG::getConstantFP(double Val, SDLoc DL, EVT VT,
  1128. bool isTarget) {
  1129. EVT EltVT = VT.getScalarType();
  1130. if (EltVT==MVT::f32)
  1131. return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
  1132. else if (EltVT==MVT::f64)
  1133. return getConstantFP(APFloat(Val), DL, VT, isTarget);
  1134. else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
  1135. EltVT==MVT::f16) {
  1136. bool ignored;
  1137. APFloat apf = APFloat(Val);
  1138. apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
  1139. &ignored);
  1140. return getConstantFP(apf, DL, VT, isTarget);
  1141. } else
  1142. llvm_unreachable("Unsupported type in getConstantFP");
  1143. }
  1144. SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
  1145. EVT VT, int64_t Offset,
  1146. bool isTargetGA,
  1147. unsigned char TargetFlags) {
  1148. assert((TargetFlags == 0 || isTargetGA) &&
  1149. "Cannot set target flags on target-independent globals");
  1150. // Truncate (with sign-extension) the offset value to the pointer size.
  1151. unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
  1152. if (BitWidth < 64)
  1153. Offset = SignExtend64(Offset, BitWidth);
  1154. unsigned Opc;
  1155. if (GV->isThreadLocal())
  1156. Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
  1157. else
  1158. Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
  1159. FoldingSetNodeID ID;
  1160. AddNodeIDNode(ID, Opc, getVTList(VT), None);
  1161. ID.AddPointer(GV);
  1162. ID.AddInteger(Offset);
  1163. ID.AddInteger(TargetFlags);
  1164. ID.AddInteger(GV->getType()->getAddressSpace());
  1165. void *IP = nullptr;
  1166. if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
  1167. return SDValue(E, 0);
  1168. SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
  1169. DL.getDebugLoc(), GV, VT,
  1170. Offset, TargetFlags);
  1171. CSEMap.InsertNode(N, IP);
  1172. InsertNode(N);
  1173. return SDValue(N, 0);
  1174. }
  1175. SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
  1176. unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
  1177. FoldingSetNodeID ID;
  1178. AddNodeIDNode(ID, Opc, getVTList(VT), None);
  1179. ID.AddInteger(FI);
  1180. void *IP = nullptr;
  1181. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1182. return SDValue(E, 0);
  1183. SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
  1184. CSEMap.InsertNode(N, IP);
  1185. InsertNode(N);
  1186. return SDValue(N, 0);
  1187. }
  1188. SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
  1189. unsigned char TargetFlags) {
  1190. assert((TargetFlags == 0 || isTarget) &&
  1191. "Cannot set target flags on target-independent jump tables");
  1192. unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
  1193. FoldingSetNodeID ID;
  1194. AddNodeIDNode(ID, Opc, getVTList(VT), None);
  1195. ID.AddInteger(JTI);
  1196. ID.AddInteger(TargetFlags);
  1197. void *IP = nullptr;
  1198. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1199. return SDValue(E, 0);
  1200. SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
  1201. TargetFlags);
  1202. CSEMap.InsertNode(N, IP);
  1203. InsertNode(N);
  1204. return SDValue(N, 0);
  1205. }
  1206. SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
  1207. unsigned Alignment, int Offset,
  1208. bool isTarget,
  1209. unsigned char TargetFlags) {
  1210. assert((TargetFlags == 0 || isTarget) &&
  1211. "Cannot set target flags on target-independent globals");
  1212. if (Alignment == 0)
  1213. Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
  1214. unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
  1215. FoldingSetNodeID ID;
  1216. AddNodeIDNode(ID, Opc, getVTList(VT), None);
  1217. ID.AddInteger(Alignment);
  1218. ID.AddInteger(Offset);
  1219. ID.AddPointer(C);
  1220. ID.AddInteger(TargetFlags);
  1221. void *IP = nullptr;
  1222. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1223. return SDValue(E, 0);
  1224. SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
  1225. Alignment, TargetFlags);
  1226. CSEMap.InsertNode(N, IP);
  1227. InsertNode(N);
  1228. return SDValue(N, 0);
  1229. }
  1230. SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
  1231. unsigned Alignment, int Offset,
  1232. bool isTarget,
  1233. unsigned char TargetFlags) {
  1234. assert((TargetFlags == 0 || isTarget) &&
  1235. "Cannot set target flags on target-independent globals");
  1236. if (Alignment == 0)
  1237. Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
  1238. unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
  1239. FoldingSetNodeID ID;
  1240. AddNodeIDNode(ID, Opc, getVTList(VT), None);
  1241. ID.AddInteger(Alignment);
  1242. ID.AddInteger(Offset);
  1243. C->addSelectionDAGCSEId(ID);
  1244. ID.AddInteger(TargetFlags);
  1245. void *IP = nullptr;
  1246. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1247. return SDValue(E, 0);
  1248. SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
  1249. Alignment, TargetFlags);
  1250. CSEMap.InsertNode(N, IP);
  1251. InsertNode(N);
  1252. return SDValue(N, 0);
  1253. }
  1254. SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
  1255. unsigned char TargetFlags) {
  1256. FoldingSetNodeID ID;
  1257. AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
  1258. ID.AddInteger(Index);
  1259. ID.AddInteger(Offset);
  1260. ID.AddInteger(TargetFlags);
  1261. void *IP = nullptr;
  1262. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1263. return SDValue(E, 0);
  1264. SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
  1265. TargetFlags);
  1266. CSEMap.InsertNode(N, IP);
  1267. InsertNode(N);
  1268. return SDValue(N, 0);
  1269. }
  1270. SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
  1271. FoldingSetNodeID ID;
  1272. AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
  1273. ID.AddPointer(MBB);
  1274. void *IP = nullptr;
  1275. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1276. return SDValue(E, 0);
  1277. SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
  1278. CSEMap.InsertNode(N, IP);
  1279. InsertNode(N);
  1280. return SDValue(N, 0);
  1281. }
  1282. SDValue SelectionDAG::getValueType(EVT VT) {
  1283. if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
  1284. ValueTypeNodes.size())
  1285. ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
  1286. SDNode *&N = VT.isExtended() ?
  1287. ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
  1288. if (N) return SDValue(N, 0);
  1289. N = new (NodeAllocator) VTSDNode(VT);
  1290. InsertNode(N);
  1291. return SDValue(N, 0);
  1292. }
  1293. SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
  1294. SDNode *&N = ExternalSymbols[Sym];
  1295. if (N) return SDValue(N, 0);
  1296. N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
  1297. InsertNode(N);
  1298. return SDValue(N, 0);
  1299. }
  1300. SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
  1301. SDNode *&N = MCSymbols[Sym];
  1302. if (N)
  1303. return SDValue(N, 0);
  1304. N = new (NodeAllocator) MCSymbolSDNode(Sym, VT);
  1305. InsertNode(N);
  1306. return SDValue(N, 0);
  1307. }
  1308. SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
  1309. unsigned char TargetFlags) {
  1310. SDNode *&N =
  1311. TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
  1312. TargetFlags)];
  1313. if (N) return SDValue(N, 0);
  1314. N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
  1315. InsertNode(N);
  1316. return SDValue(N, 0);
  1317. }
  1318. SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
  1319. if ((unsigned)Cond >= CondCodeNodes.size())
  1320. CondCodeNodes.resize(Cond+1);
  1321. if (!CondCodeNodes[Cond]) {
  1322. CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
  1323. CondCodeNodes[Cond] = N;
  1324. InsertNode(N);
  1325. }
  1326. return SDValue(CondCodeNodes[Cond], 0);
  1327. }
  1328. // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
  1329. // the shuffle mask M that point at N1 to point at N2, and indices that point
  1330. // N2 to point at N1.
  1331. static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
  1332. std::swap(N1, N2);
  1333. ShuffleVectorSDNode::commuteMask(M);
  1334. }
  1335. SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
  1336. SDValue N2, const int *Mask) {
  1337. assert(VT == N1.getValueType() && VT == N2.getValueType() &&
  1338. "Invalid VECTOR_SHUFFLE");
  1339. // Canonicalize shuffle undef, undef -> undef
  1340. if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
  1341. return getUNDEF(VT);
  1342. // Validate that all indices in Mask are within the range of the elements
  1343. // input to the shuffle.
  1344. unsigned NElts = VT.getVectorNumElements();
  1345. SmallVector<int, 8> MaskVec;
  1346. for (unsigned i = 0; i != NElts; ++i) {
  1347. assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
  1348. MaskVec.push_back(Mask[i]);
  1349. }
  1350. // Canonicalize shuffle v, v -> v, undef
  1351. if (N1 == N2) {
  1352. N2 = getUNDEF(VT);
  1353. for (unsigned i = 0; i != NElts; ++i)
  1354. if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
  1355. }
  1356. // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
  1357. if (N1.getOpcode() == ISD::UNDEF)
  1358. commuteShuffle(N1, N2, MaskVec);
  1359. // If shuffling a splat, try to blend the splat instead. We do this here so
  1360. // that even when this arises during lowering we don't have to re-handle it.
  1361. auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
  1362. BitVector UndefElements;
  1363. SDValue Splat = BV->getSplatValue(&UndefElements);
  1364. if (!Splat)
  1365. return;
  1366. for (int i = 0; i < (int)NElts; ++i) {
  1367. if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + (int)NElts))
  1368. continue;
  1369. // If this input comes from undef, mark it as such.
  1370. if (UndefElements[MaskVec[i] - Offset]) {
  1371. MaskVec[i] = -1;
  1372. continue;
  1373. }
  1374. // If we can blend a non-undef lane, use that instead.
  1375. if (!UndefElements[i])
  1376. MaskVec[i] = i + Offset;
  1377. }
  1378. };
  1379. if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
  1380. BlendSplat(N1BV, 0);
  1381. if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
  1382. BlendSplat(N2BV, NElts);
  1383. // Canonicalize all index into lhs, -> shuffle lhs, undef
  1384. // Canonicalize all index into rhs, -> shuffle rhs, undef
  1385. bool AllLHS = true, AllRHS = true;
  1386. bool N2Undef = N2.getOpcode() == ISD::UNDEF;
  1387. for (unsigned i = 0; i != NElts; ++i) {
  1388. if (MaskVec[i] >= (int)NElts) {
  1389. if (N2Undef)
  1390. MaskVec[i] = -1;
  1391. else
  1392. AllLHS = false;
  1393. } else if (MaskVec[i] >= 0) {
  1394. AllRHS = false;
  1395. }
  1396. }
  1397. if (AllLHS && AllRHS)
  1398. return getUNDEF(VT);
  1399. if (AllLHS && !N2Undef)
  1400. N2 = getUNDEF(VT);
  1401. if (AllRHS) {
  1402. N1 = getUNDEF(VT);
  1403. commuteShuffle(N1, N2, MaskVec);
  1404. }
  1405. // Reset our undef status after accounting for the mask.
  1406. N2Undef = N2.getOpcode() == ISD::UNDEF;
  1407. // Re-check whether both sides ended up undef.
  1408. if (N1.getOpcode() == ISD::UNDEF && N2Undef)
  1409. return getUNDEF(VT);
  1410. // If Identity shuffle return that node.
  1411. bool Identity = true, AllSame = true;
  1412. for (unsigned i = 0; i != NElts; ++i) {
  1413. if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
  1414. if (MaskVec[i] != MaskVec[0]) AllSame = false;
  1415. }
  1416. if (Identity && NElts)
  1417. return N1;
  1418. // Shuffling a constant splat doesn't change the result.
  1419. if (N2Undef) {
  1420. SDValue V = N1;
  1421. // Look through any bitcasts. We check that these don't change the number
  1422. // (and size) of elements and just changes their types.
  1423. while (V.getOpcode() == ISD::BITCAST)
  1424. V = V->getOperand(0);
  1425. // A splat should always show up as a build vector node.
  1426. if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
  1427. BitVector UndefElements;
  1428. SDValue Splat = BV->getSplatValue(&UndefElements);
  1429. // If this is a splat of an undef, shuffling it is also undef.
  1430. if (Splat && Splat.getOpcode() == ISD::UNDEF)
  1431. return getUNDEF(VT);
  1432. bool SameNumElts =
  1433. V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
  1434. // We only have a splat which can skip shuffles if there is a splatted
  1435. // value and no undef lanes rearranged by the shuffle.
  1436. if (Splat && UndefElements.none()) {
  1437. // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
  1438. // number of elements match or the value splatted is a zero constant.
  1439. if (SameNumElts)
  1440. return N1;
  1441. if (auto *C = dyn_cast<ConstantSDNode>(Splat))
  1442. if (C->isNullValue())
  1443. return N1;
  1444. }
  1445. // If the shuffle itself creates a splat, build the vector directly.
  1446. if (AllSame && SameNumElts) {
  1447. const SDValue &Splatted = BV->getOperand(MaskVec[0]);
  1448. SmallVector<SDValue, 8> Ops(NElts, Splatted);
  1449. EVT BuildVT = BV->getValueType(0);
  1450. SDValue NewBV = getNode(ISD::BUILD_VECTOR, dl, BuildVT, Ops);
  1451. // We may have jumped through bitcasts, so the type of the
  1452. // BUILD_VECTOR may not match the type of the shuffle.
  1453. if (BuildVT != VT)
  1454. NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
  1455. return NewBV;
  1456. }
  1457. }
  1458. }
  1459. FoldingSetNodeID ID;
  1460. SDValue Ops[2] = { N1, N2 };
  1461. AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
  1462. for (unsigned i = 0; i != NElts; ++i)
  1463. ID.AddInteger(MaskVec[i]);
  1464. void* IP = nullptr;
  1465. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
  1466. return SDValue(E, 0);
  1467. // Allocate the mask array for the node out of the BumpPtrAllocator, since
  1468. // SDNode doesn't have access to it. This memory will be "leaked" when
  1469. // the node is deallocated, but recovered when the NodeAllocator is released.
  1470. int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
  1471. memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
  1472. ShuffleVectorSDNode *N =
  1473. new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
  1474. dl.getDebugLoc(), N1, N2,
  1475. MaskAlloc);
  1476. CSEMap.InsertNode(N, IP);
  1477. InsertNode(N);
  1478. return SDValue(N, 0);
  1479. }
  1480. SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
  1481. MVT VT = SV.getSimpleValueType(0);
  1482. SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
  1483. ShuffleVectorSDNode::commuteMask(MaskVec);
  1484. SDValue Op0 = SV.getOperand(0);
  1485. SDValue Op1 = SV.getOperand(1);
  1486. return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, &MaskVec[0]);
  1487. }
  1488. SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
  1489. SDValue Val, SDValue DTy,
  1490. SDValue STy, SDValue Rnd, SDValue Sat,
  1491. ISD::CvtCode Code) {
  1492. // If the src and dest types are the same and the conversion is between
  1493. // integer types of the same sign or two floats, no conversion is necessary.
  1494. if (DTy == STy &&
  1495. (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
  1496. return Val;
  1497. FoldingSetNodeID ID;
  1498. SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
  1499. AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops);
  1500. void* IP = nullptr;
  1501. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
  1502. return SDValue(E, 0);
  1503. CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
  1504. dl.getDebugLoc(),
  1505. Ops, Code);
  1506. CSEMap.InsertNode(N, IP);
  1507. InsertNode(N);
  1508. return SDValue(N, 0);
  1509. }
  1510. SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
  1511. FoldingSetNodeID ID;
  1512. AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
  1513. ID.AddInteger(RegNo);
  1514. void *IP = nullptr;
  1515. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1516. return SDValue(E, 0);
  1517. SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
  1518. CSEMap.InsertNode(N, IP);
  1519. InsertNode(N);
  1520. return SDValue(N, 0);
  1521. }
  1522. SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
  1523. FoldingSetNodeID ID;
  1524. AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
  1525. ID.AddPointer(RegMask);
  1526. void *IP = nullptr;
  1527. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1528. return SDValue(E, 0);
  1529. SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
  1530. CSEMap.InsertNode(N, IP);
  1531. InsertNode(N);
  1532. return SDValue(N, 0);
  1533. }
  1534. SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
  1535. FoldingSetNodeID ID;
  1536. SDValue Ops[] = { Root };
  1537. AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops);
  1538. ID.AddPointer(Label);
  1539. void *IP = nullptr;
  1540. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1541. return SDValue(E, 0);
  1542. SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
  1543. dl.getDebugLoc(), Root, Label);
  1544. CSEMap.InsertNode(N, IP);
  1545. InsertNode(N);
  1546. return SDValue(N, 0);
  1547. }
  1548. SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
  1549. int64_t Offset,
  1550. bool isTarget,
  1551. unsigned char TargetFlags) {
  1552. unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
  1553. FoldingSetNodeID ID;
  1554. AddNodeIDNode(ID, Opc, getVTList(VT), None);
  1555. ID.AddPointer(BA);
  1556. ID.AddInteger(Offset);
  1557. ID.AddInteger(TargetFlags);
  1558. void *IP = nullptr;
  1559. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1560. return SDValue(E, 0);
  1561. SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
  1562. TargetFlags);
  1563. CSEMap.InsertNode(N, IP);
  1564. InsertNode(N);
  1565. return SDValue(N, 0);
  1566. }
  1567. SDValue SelectionDAG::getSrcValue(const Value *V) {
  1568. assert((!V || V->getType()->isPointerTy()) &&
  1569. "SrcValue is not a pointer?");
  1570. FoldingSetNodeID ID;
  1571. AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
  1572. ID.AddPointer(V);
  1573. void *IP = nullptr;
  1574. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1575. return SDValue(E, 0);
  1576. SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
  1577. CSEMap.InsertNode(N, IP);
  1578. InsertNode(N);
  1579. return SDValue(N, 0);
  1580. }
  1581. /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
  1582. SDValue SelectionDAG::getMDNode(const MDNode *MD) {
  1583. FoldingSetNodeID ID;
  1584. AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
  1585. ID.AddPointer(MD);
  1586. void *IP = nullptr;
  1587. if (SDNode *E = FindNodeOrInsertPos(ID, IP))
  1588. return SDValue(E, 0);
  1589. SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
  1590. CSEMap.InsertNode(N, IP);
  1591. InsertNode(N);
  1592. return SDValue(N, 0);
  1593. }
  1594. SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
  1595. if (VT == V.getValueType())
  1596. return V;
  1597. return getNode(ISD::BITCAST, SDLoc(V), VT, V);
  1598. }
  1599. /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
  1600. SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
  1601. unsigned SrcAS, unsigned DestAS) {
  1602. SDValue Ops[] = {Ptr};
  1603. FoldingSetNodeID ID;
  1604. AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
  1605. ID.AddInteger(SrcAS);
  1606. ID.AddInteger(DestAS);
  1607. void *IP = nullptr;
  1608. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
  1609. return SDValue(E, 0);
  1610. SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
  1611. dl.getDebugLoc(),
  1612. VT, Ptr, SrcAS, DestAS);
  1613. CSEMap.InsertNode(N, IP);
  1614. InsertNode(N);
  1615. return SDValue(N, 0);
  1616. }
  1617. /// getShiftAmountOperand - Return the specified value casted to
  1618. /// the target's desired shift amount type.
  1619. SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
  1620. EVT OpTy = Op.getValueType();
  1621. EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
  1622. if (OpTy == ShTy || OpTy.isVector()) return Op;
  1623. ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
  1624. return getNode(Opcode, SDLoc(Op), ShTy, Op);
  1625. }
  1626. /// CreateStackTemporary - Create a stack temporary, suitable for holding the
  1627. /// specified value type.
  1628. SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
  1629. MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
  1630. unsigned ByteSize = VT.getStoreSize();
  1631. Type *Ty = VT.getTypeForEVT(*getContext());
  1632. unsigned StackAlign =
  1633. std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
  1634. int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
  1635. return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
  1636. }
  1637. /// CreateStackTemporary - Create a stack temporary suitable for holding
  1638. /// either of the specified value types.
  1639. SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
  1640. unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
  1641. VT2.getStoreSizeInBits())/8;
  1642. Type *Ty1 = VT1.getTypeForEVT(*getContext());
  1643. Type *Ty2 = VT2.getTypeForEVT(*getContext());
  1644. const DataLayout &DL = getDataLayout();
  1645. unsigned Align =
  1646. std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
  1647. MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
  1648. int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
  1649. return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
  1650. }
  1651. SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
  1652. SDValue N2, ISD::CondCode Cond, SDLoc dl) {
  1653. // These setcc operations always fold.
  1654. switch (Cond) {
  1655. default: break;
  1656. case ISD::SETFALSE:
  1657. case ISD::SETFALSE2: return getConstant(0, dl, VT);
  1658. case ISD::SETTRUE:
  1659. case ISD::SETTRUE2: {
  1660. TargetLowering::BooleanContent Cnt =
  1661. TLI->getBooleanContents(N1->getValueType(0));
  1662. return getConstant(
  1663. Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
  1664. VT);
  1665. }
  1666. case ISD::SETOEQ:
  1667. case ISD::SETOGT:
  1668. case ISD::SETOGE:
  1669. case ISD::SETOLT:
  1670. case ISD::SETOLE:
  1671. case ISD::SETONE:
  1672. case ISD::SETO:
  1673. case ISD::SETUO:
  1674. case ISD::SETUEQ:
  1675. case ISD::SETUNE:
  1676. assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
  1677. break;
  1678. }
  1679. if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
  1680. const APInt &C2 = N2C->getAPIntValue();
  1681. if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
  1682. const APInt &C1 = N1C->getAPIntValue();
  1683. switch (Cond) {
  1684. default: llvm_unreachable("Unknown integer setcc!");
  1685. case ISD::SETEQ: return getConstant(C1 == C2, dl, VT);
  1686. case ISD::SETNE: return getConstant(C1 != C2, dl, VT);
  1687. case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT);
  1688. case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT);
  1689. case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT);
  1690. case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT);
  1691. case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT);
  1692. case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT);
  1693. case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT);
  1694. case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT);
  1695. }
  1696. }
  1697. }
  1698. if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
  1699. if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
  1700. APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
  1701. switch (Cond) {
  1702. default: break;
  1703. case ISD::SETEQ: if (R==APFloat::cmpUnordered)
  1704. return getUNDEF(VT);
  1705. // fall through
  1706. case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT);
  1707. case ISD::SETNE: if (R==APFloat::cmpUnordered)
  1708. return getUNDEF(VT);
  1709. // fall through
  1710. case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
  1711. R==APFloat::cmpLessThan, dl, VT);
  1712. case ISD::SETLT: if (R==APFloat::cmpUnordered)
  1713. return getUNDEF(VT);
  1714. // fall through
  1715. case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT);
  1716. case ISD::SETGT: if (R==APFloat::cmpUnordered)
  1717. return getUNDEF(VT);
  1718. // fall through
  1719. case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT);
  1720. case ISD::SETLE: if (R==APFloat::cmpUnordered)
  1721. return getUNDEF(VT);
  1722. // fall through
  1723. case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
  1724. R==APFloat::cmpEqual, dl, VT);
  1725. case ISD::SETGE: if (R==APFloat::cmpUnordered)
  1726. return getUNDEF(VT);
  1727. // fall through
  1728. case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
  1729. R==APFloat::cmpEqual, dl, VT);
  1730. case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT);
  1731. case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT);
  1732. case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
  1733. R==APFloat::cmpEqual, dl, VT);
  1734. case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT);
  1735. case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
  1736. R==APFloat::cmpLessThan, dl, VT);
  1737. case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
  1738. R==APFloat::cmpUnordered, dl, VT);
  1739. case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT);
  1740. case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT);
  1741. }
  1742. } else {
  1743. // Ensure that the constant occurs on the RHS.
  1744. ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
  1745. MVT CompVT = N1.getValueType().getSimpleVT();
  1746. if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
  1747. return SDValue();
  1748. return getSetCC(dl, VT, N2, N1, SwappedCond);
  1749. }
  1750. }
  1751. // Could not fold it.
  1752. return SDValue();
  1753. }
  1754. /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
  1755. /// use this predicate to simplify operations downstream.
  1756. bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
  1757. // This predicate is not safe for vector operations.
  1758. if (Op.getValueType().isVector())
  1759. return false;
  1760. unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
  1761. return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
  1762. }
  1763. /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
  1764. /// this predicate to simplify operations downstream. Mask is known to be zero
  1765. /// for bits that V cannot have.
  1766. bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
  1767. unsigned Depth) const {
  1768. APInt KnownZero, KnownOne;
  1769. computeKnownBits(Op, KnownZero, KnownOne, Depth);
  1770. return (KnownZero & Mask) == Mask;
  1771. }
  1772. /// Determine which bits of Op are known to be either zero or one and return
  1773. /// them in the KnownZero/KnownOne bitsets.
  1774. void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
  1775. APInt &KnownOne, unsigned Depth) const {
  1776. unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
  1777. KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
  1778. if (Depth == 6)
  1779. return; // Limit search depth.
  1780. APInt KnownZero2, KnownOne2;
  1781. switch (Op.getOpcode()) {
  1782. case ISD::Constant:
  1783. // We know all of the bits for a constant!
  1784. KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
  1785. KnownZero = ~KnownOne;
  1786. break;
  1787. case ISD::AND:
  1788. // If either the LHS or the RHS are Zero, the result is zero.
  1789. computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
  1790. computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
  1791. // Output known-1 bits are only known if set in both the LHS & RHS.
  1792. KnownOne &= KnownOne2;
  1793. // Output known-0 are known to be clear if zero in either the LHS | RHS.
  1794. KnownZero |= KnownZero2;
  1795. break;
  1796. case ISD::OR:
  1797. computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
  1798. computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
  1799. // Output known-0 bits are only known if clear in both the LHS & RHS.
  1800. KnownZero &= KnownZero2;
  1801. // Output known-1 are known to be set if set in either the LHS | RHS.
  1802. KnownOne |= KnownOne2;
  1803. break;
  1804. case ISD::XOR: {
  1805. computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
  1806. computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
  1807. // Output known-0 bits are known if clear or set in both the LHS & RHS.
  1808. APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
  1809. // Output known-1 are known to be set if set in only one of the LHS, RHS.
  1810. KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
  1811. KnownZero = KnownZeroOut;
  1812. break;
  1813. }
  1814. case ISD::MUL: {
  1815. computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
  1816. computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
  1817. // If low bits are zero in either operand, output low known-0 bits.
  1818. // Also compute a conserative estimate for high known-0 bits.
  1819. // More trickiness is possible, but this is sufficient for the
  1820. // interesting case of alignment computation.
  1821. KnownOne.clearAllBits();
  1822. unsigned TrailZ = KnownZero.countTrailingOnes() +
  1823. KnownZero2.countTrailingOnes();
  1824. unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
  1825. KnownZero2.countLeadingOnes(),
  1826. BitWidth) - BitWidth;
  1827. TrailZ = std::min(TrailZ, BitWidth);
  1828. LeadZ = std::min(LeadZ, BitWidth);
  1829. KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
  1830. APInt::getHighBitsSet(BitWidth, LeadZ);
  1831. break;
  1832. }
  1833. case ISD::UDIV: {
  1834. // For the purposes of computing leading zeros we can conservatively
  1835. // treat a udiv as a logical right shift by the power of 2 known to
  1836. // be less than the denominator.
  1837. computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
  1838. unsigned LeadZ = KnownZero2.countLeadingOnes();
  1839. KnownOne2.clearAllBits();
  1840. KnownZero2.clearAllBits();
  1841. computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
  1842. unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
  1843. if (RHSUnknownLeadingOnes != BitWidth)
  1844. LeadZ = std::min(BitWidth,
  1845. LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
  1846. KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
  1847. break;
  1848. }
  1849. case ISD::SELECT:
  1850. computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
  1851. computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
  1852. // Only known if known in both the LHS and RHS.
  1853. KnownOne &= KnownOne2;
  1854. KnownZero &= KnownZero2;
  1855. break;
  1856. case ISD::SELECT_CC:
  1857. computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
  1858. computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
  1859. // Only known if known in both the LHS and RHS.
  1860. KnownOne &= KnownOne2;
  1861. KnownZero &= KnownZero2;
  1862. break;
  1863. case ISD::SADDO:
  1864. case ISD::UADDO:
  1865. case ISD::SSUBO:
  1866. case ISD::USUBO:
  1867. case ISD::SMULO:
  1868. case ISD::UMULO:
  1869. if (Op.getResNo() != 1)
  1870. break;
  1871. // The boolean result conforms to getBooleanContents.
  1872. // If we know the result of a setcc has the top bits zero, use this info.
  1873. // We know that we have an integer-based boolean since these operations
  1874. // are only available for integer.
  1875. if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
  1876. TargetLowering::ZeroOrOneBooleanContent &&
  1877. BitWidth > 1)
  1878. KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
  1879. break;
  1880. case ISD::SETCC:
  1881. // If we know the result of a setcc has the top bits zero, use this info.
  1882. if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
  1883. TargetLowering::ZeroOrOneBooleanContent &&
  1884. BitWidth > 1)
  1885. KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
  1886. break;
  1887. case ISD::SHL:
  1888. // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
  1889. if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  1890. unsigned ShAmt = SA->getZExtValue();
  1891. // If the shift count is an invalid immediate, don't do anything.
  1892. if (ShAmt >= BitWidth)
  1893. break;
  1894. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  1895. KnownZero <<= ShAmt;
  1896. KnownOne <<= ShAmt;
  1897. // low bits known zero.
  1898. KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
  1899. }
  1900. break;
  1901. case ISD::SRL:
  1902. // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
  1903. if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  1904. unsigned ShAmt = SA->getZExtValue();
  1905. // If the shift count is an invalid immediate, don't do anything.
  1906. if (ShAmt >= BitWidth)
  1907. break;
  1908. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  1909. KnownZero = KnownZero.lshr(ShAmt);
  1910. KnownOne = KnownOne.lshr(ShAmt);
  1911. APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
  1912. KnownZero |= HighBits; // High bits known zero.
  1913. }
  1914. break;
  1915. case ISD::SRA:
  1916. if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  1917. unsigned ShAmt = SA->getZExtValue();
  1918. // If the shift count is an invalid immediate, don't do anything.
  1919. if (ShAmt >= BitWidth)
  1920. break;
  1921. // If any of the demanded bits are produced by the sign extension, we also
  1922. // demand the input sign bit.
  1923. APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
  1924. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  1925. KnownZero = KnownZero.lshr(ShAmt);
  1926. KnownOne = KnownOne.lshr(ShAmt);
  1927. // Handle the sign bits.
  1928. APInt SignBit = APInt::getSignBit(BitWidth);
  1929. SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
  1930. if (KnownZero.intersects(SignBit)) {
  1931. KnownZero |= HighBits; // New bits are known zero.
  1932. } else if (KnownOne.intersects(SignBit)) {
  1933. KnownOne |= HighBits; // New bits are known one.
  1934. }
  1935. }
  1936. break;
  1937. case ISD::SIGN_EXTEND_INREG: {
  1938. EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
  1939. unsigned EBits = EVT.getScalarType().getSizeInBits();
  1940. // Sign extension. Compute the demanded bits in the result that are not
  1941. // present in the input.
  1942. APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
  1943. APInt InSignBit = APInt::getSignBit(EBits);
  1944. APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
  1945. // If the sign extended bits are demanded, we know that the sign
  1946. // bit is demanded.
  1947. InSignBit = InSignBit.zext(BitWidth);
  1948. if (NewBits.getBoolValue())
  1949. InputDemandedBits |= InSignBit;
  1950. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  1951. KnownOne &= InputDemandedBits;
  1952. KnownZero &= InputDemandedBits;
  1953. // If the sign bit of the input is known set or clear, then we know the
  1954. // top bits of the result.
  1955. if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
  1956. KnownZero |= NewBits;
  1957. KnownOne &= ~NewBits;
  1958. } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
  1959. KnownOne |= NewBits;
  1960. KnownZero &= ~NewBits;
  1961. } else { // Input sign bit unknown
  1962. KnownZero &= ~NewBits;
  1963. KnownOne &= ~NewBits;
  1964. }
  1965. break;
  1966. }
  1967. case ISD::CTTZ:
  1968. case ISD::CTTZ_ZERO_UNDEF:
  1969. case ISD::CTLZ:
  1970. case ISD::CTLZ_ZERO_UNDEF:
  1971. case ISD::CTPOP: {
  1972. unsigned LowBits = Log2_32(BitWidth)+1;
  1973. KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
  1974. KnownOne.clearAllBits();
  1975. break;
  1976. }
  1977. case ISD::LOAD: {
  1978. LoadSDNode *LD = cast<LoadSDNode>(Op);
  1979. // If this is a ZEXTLoad and we are looking at the loaded value.
  1980. if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
  1981. EVT VT = LD->getMemoryVT();
  1982. unsigned MemBits = VT.getScalarType().getSizeInBits();
  1983. KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
  1984. } else if (const MDNode *Ranges = LD->getRanges()) {
  1985. computeKnownBitsFromRangeMetadata(*Ranges, KnownZero);
  1986. }
  1987. break;
  1988. }
  1989. case ISD::ZERO_EXTEND: {
  1990. EVT InVT = Op.getOperand(0).getValueType();
  1991. unsigned InBits = InVT.getScalarType().getSizeInBits();
  1992. APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
  1993. KnownZero = KnownZero.trunc(InBits);
  1994. KnownOne = KnownOne.trunc(InBits);
  1995. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  1996. KnownZero = KnownZero.zext(BitWidth);
  1997. KnownOne = KnownOne.zext(BitWidth);
  1998. KnownZero |= NewBits;
  1999. break;
  2000. }
  2001. case ISD::SIGN_EXTEND: {
  2002. EVT InVT = Op.getOperand(0).getValueType();
  2003. unsigned InBits = InVT.getScalarType().getSizeInBits();
  2004. APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
  2005. KnownZero = KnownZero.trunc(InBits);
  2006. KnownOne = KnownOne.trunc(InBits);
  2007. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  2008. // Note if the sign bit is known to be zero or one.
  2009. bool SignBitKnownZero = KnownZero.isNegative();
  2010. bool SignBitKnownOne = KnownOne.isNegative();
  2011. KnownZero = KnownZero.zext(BitWidth);
  2012. KnownOne = KnownOne.zext(BitWidth);
  2013. // If the sign bit is known zero or one, the top bits match.
  2014. if (SignBitKnownZero)
  2015. KnownZero |= NewBits;
  2016. else if (SignBitKnownOne)
  2017. KnownOne |= NewBits;
  2018. break;
  2019. }
  2020. case ISD::ANY_EXTEND: {
  2021. EVT InVT = Op.getOperand(0).getValueType();
  2022. unsigned InBits = InVT.getScalarType().getSizeInBits();
  2023. KnownZero = KnownZero.trunc(InBits);
  2024. KnownOne = KnownOne.trunc(InBits);
  2025. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  2026. KnownZero = KnownZero.zext(BitWidth);
  2027. KnownOne = KnownOne.zext(BitWidth);
  2028. break;
  2029. }
  2030. case ISD::TRUNCATE: {
  2031. EVT InVT = Op.getOperand(0).getValueType();
  2032. unsigned InBits = InVT.getScalarType().getSizeInBits();
  2033. KnownZero = KnownZero.zext(InBits);
  2034. KnownOne = KnownOne.zext(InBits);
  2035. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  2036. KnownZero = KnownZero.trunc(BitWidth);
  2037. KnownOne = KnownOne.trunc(BitWidth);
  2038. break;
  2039. }
  2040. case ISD::AssertZext: {
  2041. EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
  2042. APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
  2043. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  2044. KnownZero |= (~InMask);
  2045. KnownOne &= (~KnownZero);
  2046. break;
  2047. }
  2048. case ISD::FGETSIGN:
  2049. // All bits are zero except the low bit.
  2050. KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
  2051. break;
  2052. case ISD::SUB: {
  2053. if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
  2054. // We know that the top bits of C-X are clear if X contains less bits
  2055. // than C (i.e. no wrap-around can happen). For example, 20-X is
  2056. // positive if we can prove that X is >= 0 and < 16.
  2057. if (CLHS->getAPIntValue().isNonNegative()) {
  2058. unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
  2059. // NLZ can't be BitWidth with no sign bit
  2060. APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
  2061. computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
  2062. // If all of the MaskV bits are known to be zero, then we know the
  2063. // output top bits are zero, because we now know that the output is
  2064. // from [0-C].
  2065. if ((KnownZero2 & MaskV) == MaskV) {
  2066. unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
  2067. // Top bits known zero.
  2068. KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
  2069. }
  2070. }
  2071. }
  2072. }
  2073. // fall through
  2074. case ISD::ADD:
  2075. case ISD::ADDE: {
  2076. // Output known-0 bits are known if clear or set in both the low clear bits
  2077. // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
  2078. // low 3 bits clear.
  2079. // Output known-0 bits are also known if the top bits of each input are
  2080. // known to be clear. For example, if one input has the top 10 bits clear
  2081. // and the other has the top 8 bits clear, we know the top 7 bits of the
  2082. // output must be clear.
  2083. computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
  2084. unsigned KnownZeroHigh = KnownZero2.countLeadingOnes();
  2085. unsigned KnownZeroLow = KnownZero2.countTrailingOnes();
  2086. computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
  2087. KnownZeroHigh = std::min(KnownZeroHigh,
  2088. KnownZero2.countLeadingOnes());
  2089. KnownZeroLow = std::min(KnownZeroLow,
  2090. KnownZero2.countTrailingOnes());
  2091. if (Op.getOpcode() == ISD::ADD) {
  2092. KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroLow);
  2093. if (KnownZeroHigh > 1)
  2094. KnownZero |= APInt::getHighBitsSet(BitWidth, KnownZeroHigh - 1);
  2095. break;
  2096. }
  2097. // With ADDE, a carry bit may be added in, so we can only use this
  2098. // information if we know (at least) that the low two bits are clear. We
  2099. // then return to the caller that the low bit is unknown but that other bits
  2100. // are known zero.
  2101. if (KnownZeroLow >= 2) // ADDE
  2102. KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroLow);
  2103. break;
  2104. }
  2105. case ISD::SREM:
  2106. if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  2107. const APInt &RA = Rem->getAPIntValue().abs();
  2108. if (RA.isPowerOf2()) {
  2109. APInt LowBits = RA - 1;
  2110. computeKnownBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
  2111. // The low bits of the first operand are unchanged by the srem.
  2112. KnownZero = KnownZero2 & LowBits;
  2113. KnownOne = KnownOne2 & LowBits;
  2114. // If the first operand is non-negative or has all low bits zero, then
  2115. // the upper bits are all zero.
  2116. if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
  2117. KnownZero |= ~LowBits;
  2118. // If the first operand is negative and not all low bits are zero, then
  2119. // the upper bits are all one.
  2120. if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
  2121. KnownOne |= ~LowBits;
  2122. assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
  2123. }
  2124. }
  2125. break;
  2126. case ISD::UREM: {
  2127. if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  2128. const APInt &RA = Rem->getAPIntValue();
  2129. if (RA.isPowerOf2()) {
  2130. APInt LowBits = (RA - 1);
  2131. computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth + 1);
  2132. // The upper bits are all zero, the lower ones are unchanged.
  2133. KnownZero = KnownZero2 | ~LowBits;
  2134. KnownOne = KnownOne2 & LowBits;
  2135. break;
  2136. }
  2137. }
  2138. // Since the result is less than or equal to either operand, any leading
  2139. // zero bits in either operand must also exist in the result.
  2140. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  2141. computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
  2142. uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
  2143. KnownZero2.countLeadingOnes());
  2144. KnownOne.clearAllBits();
  2145. KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
  2146. break;
  2147. }
  2148. case ISD::EXTRACT_ELEMENT: {
  2149. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  2150. const unsigned Index =
  2151. cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
  2152. const unsigned BitWidth = Op.getValueType().getSizeInBits();
  2153. // Remove low part of known bits mask
  2154. KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth);
  2155. KnownOne = KnownOne.getHiBits(KnownOne.getBitWidth() - Index * BitWidth);
  2156. // Remove high part of known bit mask
  2157. KnownZero = KnownZero.trunc(BitWidth);
  2158. KnownOne = KnownOne.trunc(BitWidth);
  2159. break;
  2160. }
  2161. case ISD::SMIN:
  2162. case ISD::SMAX:
  2163. case ISD::UMIN:
  2164. case ISD::UMAX: {
  2165. APInt Op0Zero, Op0One;
  2166. APInt Op1Zero, Op1One;
  2167. computeKnownBits(Op.getOperand(0), Op0Zero, Op0One, Depth);
  2168. computeKnownBits(Op.getOperand(1), Op1Zero, Op1One, Depth);
  2169. KnownZero = Op0Zero & Op1Zero;
  2170. KnownOne = Op0One & Op1One;
  2171. break;
  2172. }
  2173. case ISD::FrameIndex:
  2174. case ISD::TargetFrameIndex:
  2175. if (unsigned Align = InferPtrAlignment(Op)) {
  2176. // The low bits are known zero if the pointer is aligned.
  2177. KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
  2178. break;
  2179. }
  2180. break;
  2181. default:
  2182. if (Op.getOpcode() < ISD::BUILTIN_OP_END)
  2183. break;
  2184. // Fallthrough
  2185. case ISD::INTRINSIC_WO_CHAIN:
  2186. case ISD::INTRINSIC_W_CHAIN:
  2187. case ISD::INTRINSIC_VOID:
  2188. // Allow the target to implement this method for its nodes.
  2189. TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
  2190. break;
  2191. }
  2192. assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
  2193. }
  2194. /// ComputeNumSignBits - Return the number of times the sign bit of the
  2195. /// register is replicated into the other bits. We know that at least 1 bit
  2196. /// is always equal to the sign bit (itself), but other cases can give us
  2197. /// information. For example, immediately after an "SRA X, 2", we know that
  2198. /// the top 3 bits are all equal to each other, so we return 3.
  2199. unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
  2200. EVT VT = Op.getValueType();
  2201. assert(VT.isInteger() && "Invalid VT!");
  2202. unsigned VTBits = VT.getScalarType().getSizeInBits();
  2203. unsigned Tmp, Tmp2;
  2204. unsigned FirstAnswer = 1;
  2205. if (Depth == 6)
  2206. return 1; // Limit search depth.
  2207. switch (Op.getOpcode()) {
  2208. default: break;
  2209. case ISD::AssertSext:
  2210. Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
  2211. return VTBits-Tmp+1;
  2212. case ISD::AssertZext:
  2213. Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
  2214. return VTBits-Tmp;
  2215. case ISD::Constant: {
  2216. const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
  2217. return Val.getNumSignBits();
  2218. }
  2219. case ISD::SIGN_EXTEND:
  2220. Tmp =
  2221. VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
  2222. return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
  2223. case ISD::SIGN_EXTEND_INREG:
  2224. // Max of the input and what this extends.
  2225. Tmp =
  2226. cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
  2227. Tmp = VTBits-Tmp+1;
  2228. Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
  2229. return std::max(Tmp, Tmp2);
  2230. case ISD::SRA:
  2231. Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
  2232. // SRA X, C -> adds C sign bits.
  2233. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  2234. Tmp += C->getZExtValue();
  2235. if (Tmp > VTBits) Tmp = VTBits;
  2236. }
  2237. return Tmp;
  2238. case ISD::SHL:
  2239. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  2240. // shl destroys sign bits.
  2241. Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
  2242. if (C->getZExtValue() >= VTBits || // Bad shift.
  2243. C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
  2244. return Tmp - C->getZExtValue();
  2245. }
  2246. break;
  2247. case ISD::AND:
  2248. case ISD::OR:
  2249. case ISD::XOR: // NOT is handled here.
  2250. // Logical binary ops preserve the number of sign bits at the worst.
  2251. Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
  2252. if (Tmp != 1) {
  2253. Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
  2254. FirstAnswer = std::min(Tmp, Tmp2);
  2255. // We computed what we know about the sign bits as our first
  2256. // answer. Now proceed to the generic code that uses
  2257. // computeKnownBits, and pick whichever answer is better.
  2258. }
  2259. break;
  2260. case ISD::SELECT:
  2261. Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
  2262. if (Tmp == 1) return 1; // Early out.
  2263. Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
  2264. return std::min(Tmp, Tmp2);
  2265. case ISD::SMIN:
  2266. case ISD::SMAX:
  2267. case ISD::UMIN:
  2268. case ISD::UMAX:
  2269. Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
  2270. if (Tmp == 1)
  2271. return 1; // Early out.
  2272. Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
  2273. return std::min(Tmp, Tmp2);
  2274. case ISD::SADDO:
  2275. case ISD::UADDO:
  2276. case ISD::SSUBO:
  2277. case ISD::USUBO:
  2278. case ISD::SMULO:
  2279. case ISD::UMULO:
  2280. if (Op.getResNo() != 1)
  2281. break;
  2282. // The boolean result conforms to getBooleanContents. Fall through.
  2283. // If setcc returns 0/-1, all bits are sign bits.
  2284. // We know that we have an integer-based boolean since these operations
  2285. // are only available for integer.
  2286. if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
  2287. TargetLowering::ZeroOrNegativeOneBooleanContent)
  2288. return VTBits;
  2289. break;
  2290. case ISD::SETCC:
  2291. // If setcc returns 0/-1, all bits are sign bits.
  2292. if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
  2293. TargetLowering::ZeroOrNegativeOneBooleanContent)
  2294. return VTBits;
  2295. break;
  2296. case ISD::ROTL:
  2297. case ISD::ROTR:
  2298. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
  2299. unsigned RotAmt = C->getZExtValue() & (VTBits-1);
  2300. // Handle rotate right by N like a rotate left by 32-N.
  2301. if (Op.getOpcode() == ISD::ROTR)
  2302. RotAmt = (VTBits-RotAmt) & (VTBits-1);
  2303. // If we aren't rotating out all of the known-in sign bits, return the
  2304. // number that are left. This handles rotl(sext(x), 1) for example.
  2305. Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
  2306. if (Tmp > RotAmt+1) return Tmp-RotAmt;
  2307. }
  2308. break;
  2309. case ISD::ADD:
  2310. // Add can have at most one carry bit. Thus we know that the output
  2311. // is, at worst, one more bit than the inputs.
  2312. Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
  2313. if (Tmp == 1) return 1; // Early out.
  2314. // Special case decrementing a value (ADD X, -1):
  2315. if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
  2316. if (CRHS->isAllOnesValue()) {
  2317. APInt KnownZero, KnownOne;
  2318. computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
  2319. // If the input is known to be 0 or 1, the output is 0/-1, which is all
  2320. // sign bits set.
  2321. if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
  2322. return VTBits;
  2323. // If we are subtracting one from a positive number, there is no carry
  2324. // out of the result.
  2325. if (KnownZero.isNegative())
  2326. return Tmp;
  2327. }
  2328. Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
  2329. if (Tmp2 == 1) return 1;
  2330. return std::min(Tmp, Tmp2)-1;
  2331. case ISD::SUB:
  2332. Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
  2333. if (Tmp2 == 1) return 1;
  2334. // Handle NEG.
  2335. if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
  2336. if (CLHS->isNullValue()) {
  2337. APInt KnownZero, KnownOne;
  2338. computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
  2339. // If the input is known to be 0 or 1, the output is 0/-1, which is all
  2340. // sign bits set.
  2341. if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
  2342. return VTBits;
  2343. // If the input is known to be positive (the sign bit is known clear),
  2344. // the output of the NEG has the same number of sign bits as the input.
  2345. if (KnownZero.isNegative())
  2346. return Tmp2;
  2347. // Otherwise, we treat this like a SUB.
  2348. }
  2349. // Sub can have at most one carry bit. Thus we know that the output
  2350. // is, at worst, one more bit than the inputs.
  2351. Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
  2352. if (Tmp == 1) return 1; // Early out.
  2353. return std::min(Tmp, Tmp2)-1;
  2354. case ISD::TRUNCATE:
  2355. // FIXME: it's tricky to do anything useful for this, but it is an important
  2356. // case for targets like X86.
  2357. break;
  2358. case ISD::EXTRACT_ELEMENT: {
  2359. const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
  2360. const int BitWidth = Op.getValueType().getSizeInBits();
  2361. const int Items =
  2362. Op.getOperand(0).getValueType().getSizeInBits() / BitWidth;
  2363. // Get reverse index (starting from 1), Op1 value indexes elements from
  2364. // little end. Sign starts at big end.
  2365. const int rIndex = Items - 1 -
  2366. cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
  2367. // If the sign portion ends in our element the substraction gives correct
  2368. // result. Otherwise it gives either negative or > bitwidth result
  2369. return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
  2370. }
  2371. }
  2372. // If we are looking at the loaded value of the SDNode.
  2373. if (Op.getResNo() == 0) {
  2374. // Handle LOADX separately here. EXTLOAD case will fallthrough.
  2375. if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
  2376. unsigned ExtType = LD->getExtensionType();
  2377. switch (ExtType) {
  2378. default: break;
  2379. case ISD::SEXTLOAD: // '17' bits known
  2380. Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
  2381. return VTBits-Tmp+1;
  2382. case ISD::ZEXTLOAD: // '16' bits known
  2383. Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
  2384. return VTBits-Tmp;
  2385. }
  2386. }
  2387. }
  2388. // Allow the target to implement this method for its nodes.
  2389. if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
  2390. Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
  2391. Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
  2392. Op.getOpcode() == ISD::INTRINSIC_VOID) {
  2393. unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
  2394. if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
  2395. }
  2396. // Finally, if we can prove that the top bits of the result are 0's or 1's,
  2397. // use this information.
  2398. APInt KnownZero, KnownOne;
  2399. computeKnownBits(Op, KnownZero, KnownOne, Depth);
  2400. APInt Mask;
  2401. if (KnownZero.isNegative()) { // sign bit is 0
  2402. Mask = KnownZero;
  2403. } else if (KnownOne.isNegative()) { // sign bit is 1;
  2404. Mask = KnownOne;
  2405. } else {
  2406. // Nothing known.
  2407. return FirstAnswer;
  2408. }
  2409. // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
  2410. // the number of identical bits in the top of the input value.
  2411. Mask = ~Mask;
  2412. Mask <<= Mask.getBitWidth()-VTBits;
  2413. // Return # leading zeros. We use 'min' here in case Val was zero before
  2414. // shifting. We don't want to return '64' as for an i32 "0".
  2415. return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
  2416. }
  2417. /// isBaseWithConstantOffset - Return true if the specified operand is an
  2418. /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
  2419. /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
  2420. /// semantics as an ADD. This handles the equivalence:
  2421. /// X|Cst == X+Cst iff X&Cst = 0.
  2422. bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
  2423. if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
  2424. !isa<ConstantSDNode>(Op.getOperand(1)))
  2425. return false;
  2426. if (Op.getOpcode() == ISD::OR &&
  2427. !MaskedValueIsZero(Op.getOperand(0),
  2428. cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
  2429. return false;
  2430. return true;
  2431. }
  2432. bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
  2433. // If we're told that NaNs won't happen, assume they won't.
  2434. if (getTarget().Options.NoNaNsFPMath)
  2435. return true;
  2436. // If the value is a constant, we can obviously see if it is a NaN or not.
  2437. if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
  2438. return !C->getValueAPF().isNaN();
  2439. // TODO: Recognize more cases here.
  2440. return false;
  2441. }
  2442. bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
  2443. // If the value is a constant, we can obviously see if it is a zero or not.
  2444. if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
  2445. return !C->isZero();
  2446. // TODO: Recognize more cases here.
  2447. switch (Op.getOpcode()) {
  2448. default: break;
  2449. case ISD::OR:
  2450. if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
  2451. return !C->isNullValue();
  2452. break;
  2453. }
  2454. return false;
  2455. }
  2456. bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
  2457. // Check the obvious case.
  2458. if (A == B) return true;
  2459. // For for negative and positive zero.
  2460. if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
  2461. if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
  2462. if (CA->isZero() && CB->isZero()) return true;
  2463. // Otherwise they may not be equal.
  2464. return false;
  2465. }
  2466. /// getNode - Gets or creates the specified node.
  2467. ///
  2468. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
  2469. FoldingSetNodeID ID;
  2470. AddNodeIDNode(ID, Opcode, getVTList(VT), None);
  2471. void *IP = nullptr;
  2472. if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
  2473. return SDValue(E, 0);
  2474. SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
  2475. DL.getDebugLoc(), getVTList(VT));
  2476. CSEMap.InsertNode(N, IP);
  2477. InsertNode(N);
  2478. return SDValue(N, 0);
  2479. }
  2480. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
  2481. EVT VT, SDValue Operand) {
  2482. // Constant fold unary operations with an integer constant operand. Even
  2483. // opaque constant will be folded, because the folding of unary operations
  2484. // doesn't create new constants with different values. Nevertheless, the
  2485. // opaque flag is preserved during folding to prevent future folding with
  2486. // other constants.
  2487. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
  2488. const APInt &Val = C->getAPIntValue();
  2489. switch (Opcode) {
  2490. default: break;
  2491. case ISD::SIGN_EXTEND:
  2492. return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
  2493. C->isTargetOpcode(), C->isOpaque());
  2494. case ISD::ANY_EXTEND:
  2495. case ISD::ZERO_EXTEND:
  2496. case ISD::TRUNCATE:
  2497. return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
  2498. C->isTargetOpcode(), C->isOpaque());
  2499. case ISD::UINT_TO_FP:
  2500. case ISD::SINT_TO_FP: {
  2501. APFloat apf(EVTToAPFloatSemantics(VT),
  2502. APInt::getNullValue(VT.getSizeInBits()));
  2503. (void)apf.convertFromAPInt(Val,
  2504. Opcode==ISD::SINT_TO_FP,
  2505. APFloat::rmNearestTiesToEven);
  2506. return getConstantFP(apf, DL, VT);
  2507. }
  2508. case ISD::BITCAST:
  2509. if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
  2510. return getConstantFP(APFloat(APFloat::IEEEhalf, Val), DL, VT);
  2511. if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
  2512. return getConstantFP(APFloat(APFloat::IEEEsingle, Val), DL, VT);
  2513. else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
  2514. return getConstantFP(APFloat(APFloat::IEEEdouble, Val), DL, VT);
  2515. break;
  2516. case ISD::BSWAP:
  2517. return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
  2518. C->isOpaque());
  2519. case ISD::CTPOP:
  2520. return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
  2521. C->isOpaque());
  2522. case ISD::CTLZ:
  2523. case ISD::CTLZ_ZERO_UNDEF:
  2524. return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
  2525. C->isOpaque());
  2526. case ISD::CTTZ:
  2527. case ISD::CTTZ_ZERO_UNDEF:
  2528. return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
  2529. C->isOpaque());
  2530. }
  2531. }
  2532. // Constant fold unary operations with a floating point constant operand.
  2533. if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
  2534. APFloat V = C->getValueAPF(); // make copy
  2535. switch (Opcode) {
  2536. case ISD::FNEG:
  2537. V.changeSign();
  2538. return getConstantFP(V, DL, VT);
  2539. case ISD::FABS:
  2540. V.clearSign();
  2541. return getConstantFP(V, DL, VT);
  2542. case ISD::FCEIL: {
  2543. APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
  2544. if (fs == APFloat::opOK || fs == APFloat::opInexact)
  2545. return getConstantFP(V, DL, VT);
  2546. break;
  2547. }
  2548. case ISD::FTRUNC: {
  2549. APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
  2550. if (fs == APFloat::opOK || fs == APFloat::opInexact)
  2551. return getConstantFP(V, DL, VT);
  2552. break;
  2553. }
  2554. case ISD::FFLOOR: {
  2555. APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
  2556. if (fs == APFloat::opOK || fs == APFloat::opInexact)
  2557. return getConstantFP(V, DL, VT);
  2558. break;
  2559. }
  2560. case ISD::FP_EXTEND: {
  2561. bool ignored;
  2562. // This can return overflow, underflow, or inexact; we don't care.
  2563. // FIXME need to be more flexible about rounding mode.
  2564. (void)V.convert(EVTToAPFloatSemantics(VT),
  2565. APFloat::rmNearestTiesToEven, &ignored);
  2566. return getConstantFP(V, DL, VT);
  2567. }
  2568. case ISD::FP_TO_SINT:
  2569. case ISD::FP_TO_UINT: {
  2570. integerPart x[2];
  2571. bool ignored;
  2572. static_assert(integerPartWidth >= 64, "APFloat parts too small!");
  2573. // FIXME need to be more flexible about rounding mode.
  2574. APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
  2575. Opcode==ISD::FP_TO_SINT,
  2576. APFloat::rmTowardZero, &ignored);
  2577. if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
  2578. break;
  2579. APInt api(VT.getSizeInBits(), x);
  2580. return getConstant(api, DL, VT);
  2581. }
  2582. case ISD::BITCAST:
  2583. if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
  2584. return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
  2585. else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
  2586. return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
  2587. else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
  2588. return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
  2589. break;
  2590. }
  2591. }
  2592. // Constant fold unary operations with a vector integer or float operand.
  2593. if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
  2594. if (BV->isConstant()) {
  2595. switch (Opcode) {
  2596. default:
  2597. // FIXME: Entirely reasonable to perform folding of other unary
  2598. // operations here as the need arises.
  2599. break;
  2600. case ISD::FNEG:
  2601. case ISD::FABS:
  2602. case ISD::FCEIL:
  2603. case ISD::FTRUNC:
  2604. case ISD::FFLOOR:
  2605. case ISD::FP_EXTEND:
  2606. case ISD::FP_TO_SINT:
  2607. case ISD::FP_TO_UINT:
  2608. case ISD::TRUNCATE:
  2609. case ISD::UINT_TO_FP:
  2610. case ISD::SINT_TO_FP:
  2611. case ISD::BSWAP:
  2612. case ISD::CTLZ:
  2613. case ISD::CTLZ_ZERO_UNDEF:
  2614. case ISD::CTTZ:
  2615. case ISD::CTTZ_ZERO_UNDEF:
  2616. case ISD::CTPOP: {
  2617. EVT SVT = VT.getScalarType();
  2618. EVT InVT = BV->getValueType(0);
  2619. EVT InSVT = InVT.getScalarType();
  2620. // Find legal integer scalar type for constant promotion and
  2621. // ensure that its scalar size is at least as large as source.
  2622. EVT LegalSVT = SVT;
  2623. if (SVT.isInteger()) {
  2624. LegalSVT = TLI->getTypeToTransformTo(*getContext(), SVT);
  2625. if (LegalSVT.bitsLT(SVT)) break;
  2626. }
  2627. // Let the above scalar folding handle the folding of each element.
  2628. SmallVector<SDValue, 8> Ops;
  2629. for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
  2630. SDValue OpN = BV->getOperand(i);
  2631. EVT OpVT = OpN.getValueType();
  2632. // Build vector (integer) scalar operands may need implicit
  2633. // truncation - do this before constant folding.
  2634. if (OpVT.isInteger() && OpVT.bitsGT(InSVT))
  2635. OpN = getNode(ISD::TRUNCATE, DL, InSVT, OpN);
  2636. OpN = getNode(Opcode, DL, SVT, OpN);
  2637. // Legalize the (integer) scalar constant if necessary.
  2638. if (LegalSVT != SVT)
  2639. OpN = getNode(ISD::ANY_EXTEND, DL, LegalSVT, OpN);
  2640. if (OpN.getOpcode() != ISD::UNDEF &&
  2641. OpN.getOpcode() != ISD::Constant &&
  2642. OpN.getOpcode() != ISD::ConstantFP)
  2643. break;
  2644. Ops.push_back(OpN);
  2645. }
  2646. if (Ops.size() == VT.getVectorNumElements())
  2647. return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
  2648. break;
  2649. }
  2650. }
  2651. }
  2652. }
  2653. unsigned OpOpcode = Operand.getNode()->getOpcode();
  2654. switch (Opcode) {
  2655. case ISD::TokenFactor:
  2656. case ISD::MERGE_VALUES:
  2657. case ISD::CONCAT_VECTORS:
  2658. return Operand; // Factor, merge or concat of one node? No need.
  2659. case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
  2660. case ISD::FP_EXTEND:
  2661. assert(VT.isFloatingPoint() &&
  2662. Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
  2663. if (Operand.getValueType() == VT) return Operand; // noop conversion.
  2664. assert((!VT.isVector() ||
  2665. VT.getVectorNumElements() ==
  2666. Operand.getValueType().getVectorNumElements()) &&
  2667. "Vector element count mismatch!");
  2668. if (Operand.getOpcode() == ISD::UNDEF)
  2669. return getUNDEF(VT);
  2670. break;
  2671. case ISD::SIGN_EXTEND:
  2672. assert(VT.isInteger() && Operand.getValueType().isInteger() &&
  2673. "Invalid SIGN_EXTEND!");
  2674. if (Operand.getValueType() == VT) return Operand; // noop extension
  2675. assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
  2676. "Invalid sext node, dst < src!");
  2677. assert((!VT.isVector() ||
  2678. VT.getVectorNumElements() ==
  2679. Operand.getValueType().getVectorNumElements()) &&
  2680. "Vector element count mismatch!");
  2681. if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
  2682. return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
  2683. else if (OpOpcode == ISD::UNDEF)
  2684. // sext(undef) = 0, because the top bits will all be the same.
  2685. return getConstant(0, DL, VT);
  2686. break;
  2687. case ISD::ZERO_EXTEND:
  2688. assert(VT.isInteger() && Operand.getValueType().isInteger() &&
  2689. "Invalid ZERO_EXTEND!");
  2690. if (Operand.getValueType() == VT) return Operand; // noop extension
  2691. assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
  2692. "Invalid zext node, dst < src!");
  2693. assert((!VT.isVector() ||
  2694. VT.getVectorNumElements() ==
  2695. Operand.getValueType().getVectorNumElements()) &&
  2696. "Vector element count mismatch!");
  2697. if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
  2698. return getNode(ISD::ZERO_EXTEND, DL, VT,
  2699. Operand.getNode()->getOperand(0));
  2700. else if (OpOpcode == ISD::UNDEF)
  2701. // zext(undef) = 0, because the top bits will be zero.
  2702. return getConstant(0, DL, VT);
  2703. break;
  2704. case ISD::ANY_EXTEND:
  2705. assert(VT.isInteger() && Operand.getValueType().isInteger() &&
  2706. "Invalid ANY_EXTEND!");
  2707. if (Operand.getValueType() == VT) return Operand; // noop extension
  2708. assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
  2709. "Invalid anyext node, dst < src!");
  2710. assert((!VT.isVector() ||
  2711. VT.getVectorNumElements() ==
  2712. Operand.getValueType().getVectorNumElements()) &&
  2713. "Vector element count mismatch!");
  2714. if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
  2715. OpOpcode == ISD::ANY_EXTEND)
  2716. // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
  2717. return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
  2718. else if (OpOpcode == ISD::UNDEF)
  2719. return getUNDEF(VT);
  2720. // (ext (trunx x)) -> x
  2721. if (OpOpcode == ISD::TRUNCATE) {
  2722. SDValue OpOp = Operand.getNode()->getOperand(0);
  2723. if (OpOp.getValueType() == VT)
  2724. return OpOp;
  2725. }
  2726. break;
  2727. case ISD::TRUNCATE:
  2728. assert(VT.isInteger() && Operand.getValueType().isInteger() &&
  2729. "Invalid TRUNCATE!");
  2730. if (Operand.getValueType() == VT) return Operand; // noop truncate
  2731. assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
  2732. "Invalid truncate node, src < dst!");
  2733. assert((!VT.isVector() ||
  2734. VT.getVectorNumElements() ==
  2735. Operand.getValueType().getVectorNumElements()) &&
  2736. "Vector element count mismatch!");
  2737. if (OpOpcode == ISD::TRUNCATE)
  2738. return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
  2739. if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
  2740. OpOpcode == ISD::ANY_EXTEND) {
  2741. // If the source is smaller than the dest, we still need an extend.
  2742. if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
  2743. .bitsLT(VT.getScalarType()))
  2744. return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
  2745. if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
  2746. return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
  2747. return Operand.getNode()->getOperand(0);
  2748. }
  2749. if (OpOpcode == ISD::UNDEF)
  2750. return getUNDEF(VT);
  2751. break;
  2752. case ISD::BSWAP:
  2753. assert(VT.isInteger() && VT == Operand.getValueType() &&
  2754. "Invalid BSWAP!");
  2755. assert((VT.getScalarSizeInBits() % 16 == 0) &&
  2756. "BSWAP types must be a multiple of 16 bits!");
  2757. if (OpOpcode == ISD::UNDEF)
  2758. return getUNDEF(VT);
  2759. break;
  2760. case ISD::BITCAST:
  2761. // Basic sanity checking.
  2762. assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
  2763. && "Cannot BITCAST between types of different sizes!");
  2764. if (VT == Operand.getValueType()) return Operand; // noop conversion.
  2765. if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
  2766. return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
  2767. if (OpOpcode == ISD::UNDEF)
  2768. return getUNDEF(VT);
  2769. break;
  2770. case ISD::SCALAR_TO_VECTOR:
  2771. assert(VT.isVector() && !Operand.getValueType().isVector() &&
  2772. (VT.getVectorElementType() == Operand.getValueType() ||
  2773. (VT.getVectorElementType().isInteger() &&
  2774. Operand.getValueType().isInteger() &&
  2775. VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
  2776. "Illegal SCALAR_TO_VECTOR node!");
  2777. if (OpOpcode == ISD::UNDEF)
  2778. return getUNDEF(VT);
  2779. // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
  2780. if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
  2781. isa<ConstantSDNode>(Operand.getOperand(1)) &&
  2782. Operand.getConstantOperandVal(1) == 0 &&
  2783. Operand.getOperand(0).getValueType() == VT)
  2784. return Operand.getOperand(0);
  2785. break;
  2786. case ISD::FNEG:
  2787. // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
  2788. if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
  2789. return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
  2790. Operand.getNode()->getOperand(0));
  2791. if (OpOpcode == ISD::FNEG) // --X -> X
  2792. return Operand.getNode()->getOperand(0);
  2793. break;
  2794. case ISD::FABS:
  2795. if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
  2796. return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
  2797. break;
  2798. }
  2799. SDNode *N;
  2800. SDVTList VTs = getVTList(VT);
  2801. if (VT != MVT::Glue) { // Don't CSE flag producing nodes
  2802. FoldingSetNodeID ID;
  2803. SDValue Ops[1] = { Operand };
  2804. AddNodeIDNode(ID, Opcode, VTs, Ops);
  2805. void *IP = nullptr;
  2806. if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
  2807. return SDValue(E, 0);
  2808. N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
  2809. DL.getDebugLoc(), VTs, Operand);
  2810. CSEMap.InsertNode(N, IP);
  2811. } else {
  2812. N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
  2813. DL.getDebugLoc(), VTs, Operand);
  2814. }
  2815. InsertNode(N);
  2816. return SDValue(N, 0);
  2817. }
  2818. static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
  2819. const APInt &C2) {
  2820. switch (Opcode) {
  2821. case ISD::ADD: return std::make_pair(C1 + C2, true);
  2822. case ISD::SUB: return std::make_pair(C1 - C2, true);
  2823. case ISD::MUL: return std::make_pair(C1 * C2, true);
  2824. case ISD::AND: return std::make_pair(C1 & C2, true);
  2825. case ISD::OR: return std::make_pair(C1 | C2, true);
  2826. case ISD::XOR: return std::make_pair(C1 ^ C2, true);
  2827. case ISD::SHL: return std::make_pair(C1 << C2, true);
  2828. case ISD::SRL: return std::make_pair(C1.lshr(C2), true);
  2829. case ISD::SRA: return std::make_pair(C1.ashr(C2), true);
  2830. case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
  2831. case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
  2832. case ISD::UDIV:
  2833. if (!C2.getBoolValue())
  2834. break;
  2835. return std::make_pair(C1.udiv(C2), true);
  2836. case ISD::UREM:
  2837. if (!C2.getBoolValue())
  2838. break;
  2839. return std::make_pair(C1.urem(C2), true);
  2840. case ISD::SDIV:
  2841. if (!C2.getBoolValue())
  2842. break;
  2843. return std::make_pair(C1.sdiv(C2), true);
  2844. case ISD::SREM:
  2845. if (!C2.getBoolValue())
  2846. break;
  2847. return std::make_pair(C1.srem(C2), true);
  2848. }
  2849. return std::make_pair(APInt(1, 0), false);
  2850. }
  2851. SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
  2852. const ConstantSDNode *Cst1,
  2853. const ConstantSDNode *Cst2) {
  2854. if (Cst1->isOpaque() || Cst2->isOpaque())
  2855. return SDValue();
  2856. std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(),
  2857. Cst2->getAPIntValue());
  2858. if (!Folded.second)
  2859. return SDValue();
  2860. return getConstant(Folded.first, DL, VT);
  2861. }
  2862. SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
  2863. SDNode *Cst1, SDNode *Cst2) {
  2864. // If the opcode is a target-specific ISD node, there's nothing we can
  2865. // do here and the operand rules may not line up with the below, so
  2866. // bail early.
  2867. if (Opcode >= ISD::BUILTIN_OP_END)
  2868. return SDValue();
  2869. // Handle the case of two scalars.
  2870. if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) {
  2871. if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) {
  2872. if (SDValue Folded =
  2873. FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2)) {
  2874. if (!VT.isVector())
  2875. return Folded;
  2876. SmallVector<SDValue, 4> Outputs;
  2877. // We may have a vector type but a scalar result. Create a splat.
  2878. Outputs.resize(VT.getVectorNumElements(), Outputs.back());
  2879. // Build a big vector out of the scalar elements we generated.
  2880. return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
  2881. } else {
  2882. return SDValue();
  2883. }
  2884. }
  2885. }
  2886. // For vectors extract each constant element into Inputs so we can constant
  2887. // fold them individually.
  2888. BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
  2889. BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
  2890. if (!BV1 || !BV2)
  2891. return SDValue();
  2892. assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
  2893. EVT SVT = VT.getScalarType();
  2894. SmallVector<SDValue, 4> Outputs;
  2895. for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
  2896. ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
  2897. ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
  2898. if (!V1 || !V2) // Not a constant, bail.
  2899. return SDValue();
  2900. if (V1->isOpaque() || V2->isOpaque())
  2901. return SDValue();
  2902. // Avoid BUILD_VECTOR nodes that perform implicit truncation.
  2903. // FIXME: This is valid and could be handled by truncating the APInts.
  2904. if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
  2905. return SDValue();
  2906. // Fold one vector element.
  2907. std::pair<APInt, bool> Folded = FoldValue(Opcode, V1->getAPIntValue(),
  2908. V2->getAPIntValue());
  2909. if (!Folded.second)
  2910. return SDValue();
  2911. Outputs.push_back(getConstant(Folded.first, DL, SVT));
  2912. }
  2913. assert(VT.getVectorNumElements() == Outputs.size() &&
  2914. "Vector size mismatch!");
  2915. // We may have a vector type but a scalar result. Create a splat.
  2916. Outputs.resize(VT.getVectorNumElements(), Outputs.back());
  2917. // Build a big vector out of the scalar elements we generated.
  2918. return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
  2919. }
  2920. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
  2921. SDValue N2, const SDNodeFlags *Flags) {
  2922. ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
  2923. ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
  2924. switch (Opcode) {
  2925. default: break;
  2926. case ISD::TokenFactor:
  2927. assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
  2928. N2.getValueType() == MVT::Other && "Invalid token factor!");
  2929. // Fold trivial token factors.
  2930. if (N1.getOpcode() == ISD::EntryToken) return N2;
  2931. if (N2.getOpcode() == ISD::EntryToken) return N1;
  2932. if (N1 == N2) return N1;
  2933. break;
  2934. case ISD::CONCAT_VECTORS:
  2935. // Concat of UNDEFs is UNDEF.
  2936. if (N1.getOpcode() == ISD::UNDEF &&
  2937. N2.getOpcode() == ISD::UNDEF)
  2938. return getUNDEF(VT);
  2939. // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
  2940. // one big BUILD_VECTOR.
  2941. if (N1.getOpcode() == ISD::BUILD_VECTOR &&
  2942. N2.getOpcode() == ISD::BUILD_VECTOR) {
  2943. SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
  2944. N1.getNode()->op_end());
  2945. Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
  2946. // BUILD_VECTOR requires all inputs to be of the same type, find the
  2947. // maximum type and extend them all.
  2948. EVT SVT = VT.getScalarType();
  2949. for (SDValue Op : Elts)
  2950. SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
  2951. if (SVT.bitsGT(VT.getScalarType()))
  2952. for (SDValue &Op : Elts)
  2953. Op = TLI->isZExtFree(Op.getValueType(), SVT)
  2954. ? getZExtOrTrunc(Op, DL, SVT)
  2955. : getSExtOrTrunc(Op, DL, SVT);
  2956. return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
  2957. }
  2958. break;
  2959. case ISD::AND:
  2960. assert(VT.isInteger() && "This operator does not apply to FP types!");
  2961. assert(N1.getValueType() == N2.getValueType() &&
  2962. N1.getValueType() == VT && "Binary operator types must match!");
  2963. // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
  2964. // worth handling here.
  2965. if (N2C && N2C->isNullValue())
  2966. return N2;
  2967. if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
  2968. return N1;
  2969. break;
  2970. case ISD::OR:
  2971. case ISD::XOR:
  2972. case ISD::ADD:
  2973. case ISD::SUB:
  2974. assert(VT.isInteger() && "This operator does not apply to FP types!");
  2975. assert(N1.getValueType() == N2.getValueType() &&
  2976. N1.getValueType() == VT && "Binary operator types must match!");
  2977. // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
  2978. // it's worth handling here.
  2979. if (N2C && N2C->isNullValue())
  2980. return N1;
  2981. break;
  2982. case ISD::UDIV:
  2983. case ISD::UREM:
  2984. case ISD::MULHU:
  2985. case ISD::MULHS:
  2986. case ISD::MUL:
  2987. case ISD::SDIV:
  2988. case ISD::SREM:
  2989. assert(VT.isInteger() && "This operator does not apply to FP types!");
  2990. assert(N1.getValueType() == N2.getValueType() &&
  2991. N1.getValueType() == VT && "Binary operator types must match!");
  2992. break;
  2993. case ISD::FADD:
  2994. case ISD::FSUB:
  2995. case ISD::FMUL:
  2996. case ISD::FDIV:
  2997. case ISD::FREM:
  2998. if (getTarget().Options.UnsafeFPMath) {
  2999. if (Opcode == ISD::FADD) {
  3000. // 0+x --> x
  3001. if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
  3002. if (CFP->getValueAPF().isZero())
  3003. return N2;
  3004. // x+0 --> x
  3005. if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
  3006. if (CFP->getValueAPF().isZero())
  3007. return N1;
  3008. } else if (Opcode == ISD::FSUB) {
  3009. // x-0 --> x
  3010. if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
  3011. if (CFP->getValueAPF().isZero())
  3012. return N1;
  3013. } else if (Opcode == ISD::FMUL) {
  3014. ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
  3015. SDValue V = N2;
  3016. // If the first operand isn't the constant, try the second
  3017. if (!CFP) {
  3018. CFP = dyn_cast<ConstantFPSDNode>(N2);
  3019. V = N1;
  3020. }
  3021. if (CFP) {
  3022. // 0*x --> 0
  3023. if (CFP->isZero())
  3024. return SDValue(CFP,0);
  3025. // 1*x --> x
  3026. if (CFP->isExactlyValue(1.0))
  3027. return V;
  3028. }
  3029. }
  3030. }
  3031. assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
  3032. assert(N1.getValueType() == N2.getValueType() &&
  3033. N1.getValueType() == VT && "Binary operator types must match!");
  3034. break;
  3035. case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
  3036. assert(N1.getValueType() == VT &&
  3037. N1.getValueType().isFloatingPoint() &&
  3038. N2.getValueType().isFloatingPoint() &&
  3039. "Invalid FCOPYSIGN!");
  3040. break;
  3041. case ISD::SHL:
  3042. case ISD::SRA:
  3043. case ISD::SRL:
  3044. case ISD::ROTL:
  3045. case ISD::ROTR:
  3046. assert(VT == N1.getValueType() &&
  3047. "Shift operators return type must be the same as their first arg");
  3048. assert(VT.isInteger() && N2.getValueType().isInteger() &&
  3049. "Shifts only work on integers");
  3050. assert((!VT.isVector() || VT == N2.getValueType()) &&
  3051. "Vector shift amounts must be in the same as their first arg");
  3052. // Verify that the shift amount VT is bit enough to hold valid shift
  3053. // amounts. This catches things like trying to shift an i1024 value by an
  3054. // i8, which is easy to fall into in generic code that uses
  3055. // TLI.getShiftAmount().
  3056. assert(N2.getValueType().getSizeInBits() >=
  3057. Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
  3058. "Invalid use of small shift amount with oversized value!");
  3059. // Always fold shifts of i1 values so the code generator doesn't need to
  3060. // handle them. Since we know the size of the shift has to be less than the
  3061. // size of the value, the shift/rotate count is guaranteed to be zero.
  3062. if (VT == MVT::i1)
  3063. return N1;
  3064. if (N2C && N2C->isNullValue())
  3065. return N1;
  3066. break;
  3067. case ISD::FP_ROUND_INREG: {
  3068. EVT EVT = cast<VTSDNode>(N2)->getVT();
  3069. assert(VT == N1.getValueType() && "Not an inreg round!");
  3070. assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
  3071. "Cannot FP_ROUND_INREG integer types");
  3072. assert(EVT.isVector() == VT.isVector() &&
  3073. "FP_ROUND_INREG type should be vector iff the operand "
  3074. "type is vector!");
  3075. assert((!EVT.isVector() ||
  3076. EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
  3077. "Vector element counts must match in FP_ROUND_INREG");
  3078. assert(EVT.bitsLE(VT) && "Not rounding down!");
  3079. (void)EVT;
  3080. if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
  3081. break;
  3082. }
  3083. case ISD::FP_ROUND:
  3084. assert(VT.isFloatingPoint() &&
  3085. N1.getValueType().isFloatingPoint() &&
  3086. VT.bitsLE(N1.getValueType()) &&
  3087. isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
  3088. if (N1.getValueType() == VT) return N1; // noop conversion.
  3089. break;
  3090. case ISD::AssertSext:
  3091. case ISD::AssertZext: {
  3092. EVT EVT = cast<VTSDNode>(N2)->getVT();
  3093. assert(VT == N1.getValueType() && "Not an inreg extend!");
  3094. assert(VT.isInteger() && EVT.isInteger() &&
  3095. "Cannot *_EXTEND_INREG FP types");
  3096. assert(!EVT.isVector() &&
  3097. "AssertSExt/AssertZExt type should be the vector element type "
  3098. "rather than the vector type!");
  3099. assert(EVT.bitsLE(VT) && "Not extending!");
  3100. if (VT == EVT) return N1; // noop assertion.
  3101. break;
  3102. }
  3103. case ISD::SIGN_EXTEND_INREG: {
  3104. EVT EVT = cast<VTSDNode>(N2)->getVT();
  3105. assert(VT == N1.getValueType() && "Not an inreg extend!");
  3106. assert(VT.isInteger() && EVT.isInteger() &&
  3107. "Cannot *_EXTEND_INREG FP types");
  3108. assert(EVT.isVector() == VT.isVector() &&
  3109. "SIGN_EXTEND_INREG type should be vector iff the operand "
  3110. "type is vector!");
  3111. assert((!EVT.isVector() ||
  3112. EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
  3113. "Vector element counts must match in SIGN_EXTEND_INREG");
  3114. assert(EVT.bitsLE(VT) && "Not extending!");
  3115. if (EVT == VT) return N1; // Not actually extending
  3116. auto SignExtendInReg = [&](APInt Val) {
  3117. unsigned FromBits = EVT.getScalarType().getSizeInBits();
  3118. Val <<= Val.getBitWidth() - FromBits;
  3119. Val = Val.ashr(Val.getBitWidth() - FromBits);
  3120. return getConstant(Val, DL, VT.getScalarType());
  3121. };
  3122. if (N1C) {
  3123. APInt Val = N1C->getAPIntValue();
  3124. return SignExtendInReg(Val);
  3125. }
  3126. if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
  3127. SmallVector<SDValue, 8> Ops;
  3128. for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
  3129. SDValue Op = N1.getOperand(i);
  3130. if (Op.getValueType() != VT.getScalarType()) break;
  3131. if (Op.getOpcode() == ISD::UNDEF) {
  3132. Ops.push_back(Op);
  3133. continue;
  3134. }
  3135. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
  3136. APInt Val = C->getAPIntValue();
  3137. Ops.push_back(SignExtendInReg(Val));
  3138. continue;
  3139. }
  3140. break;
  3141. }
  3142. if (Ops.size() == VT.getVectorNumElements())
  3143. return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
  3144. }
  3145. break;
  3146. }
  3147. case ISD::EXTRACT_VECTOR_ELT:
  3148. // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
  3149. if (N1.getOpcode() == ISD::UNDEF)
  3150. return getUNDEF(VT);
  3151. // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
  3152. if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements())
  3153. return getUNDEF(VT);
  3154. // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
  3155. // expanding copies of large vectors from registers.
  3156. if (N2C &&
  3157. N1.getOpcode() == ISD::CONCAT_VECTORS &&
  3158. N1.getNumOperands() > 0) {
  3159. unsigned Factor =
  3160. N1.getOperand(0).getValueType().getVectorNumElements();
  3161. return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
  3162. N1.getOperand(N2C->getZExtValue() / Factor),
  3163. getConstant(N2C->getZExtValue() % Factor, DL,
  3164. N2.getValueType()));
  3165. }
  3166. // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
  3167. // expanding large vector constants.
  3168. if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
  3169. SDValue Elt = N1.getOperand(N2C->getZExtValue());
  3170. if (VT != Elt.getValueType())
  3171. // If the vector element type is not legal, the BUILD_VECTOR operands
  3172. // are promoted and implicitly truncated, and the result implicitly
  3173. // extended. Make that explicit here.
  3174. Elt = getAnyExtOrTrunc(Elt, DL, VT);
  3175. return Elt;
  3176. }
  3177. // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
  3178. // operations are lowered to scalars.
  3179. if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
  3180. // If the indices are the same, return the inserted element else
  3181. // if the indices are known different, extract the element from
  3182. // the original vector.
  3183. SDValue N1Op2 = N1.getOperand(2);
  3184. ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
  3185. if (N1Op2C && N2C) {
  3186. if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
  3187. if (VT == N1.getOperand(1).getValueType())
  3188. return N1.getOperand(1);
  3189. else
  3190. return getSExtOrTrunc(N1.getOperand(1), DL, VT);
  3191. }
  3192. return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
  3193. }
  3194. }
  3195. break;
  3196. case ISD::EXTRACT_ELEMENT:
  3197. assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
  3198. assert(!N1.getValueType().isVector() && !VT.isVector() &&
  3199. (N1.getValueType().isInteger() == VT.isInteger()) &&
  3200. N1.getValueType() != VT &&
  3201. "Wrong types for EXTRACT_ELEMENT!");
  3202. // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
  3203. // 64-bit integers into 32-bit parts. Instead of building the extract of
  3204. // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
  3205. if (N1.getOpcode() == ISD::BUILD_PAIR)
  3206. return N1.getOperand(N2C->getZExtValue());
  3207. // EXTRACT_ELEMENT of a constant int is also very common.
  3208. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
  3209. unsigned ElementSize = VT.getSizeInBits();
  3210. unsigned Shift = ElementSize * N2C->getZExtValue();
  3211. APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
  3212. return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
  3213. }
  3214. break;
  3215. case ISD::EXTRACT_SUBVECTOR: {
  3216. SDValue Index = N2;
  3217. if (VT.isSimple() && N1.getValueType().isSimple()) {
  3218. assert(VT.isVector() && N1.getValueType().isVector() &&
  3219. "Extract subvector VTs must be a vectors!");
  3220. assert(VT.getVectorElementType() ==
  3221. N1.getValueType().getVectorElementType() &&
  3222. "Extract subvector VTs must have the same element type!");
  3223. assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
  3224. "Extract subvector must be from larger vector to smaller vector!");
  3225. if (isa<ConstantSDNode>(Index)) {
  3226. assert((VT.getVectorNumElements() +
  3227. cast<ConstantSDNode>(Index)->getZExtValue()
  3228. <= N1.getValueType().getVectorNumElements())
  3229. && "Extract subvector overflow!");
  3230. }
  3231. // Trivial extraction.
  3232. if (VT.getSimpleVT() == N1.getSimpleValueType())
  3233. return N1;
  3234. }
  3235. break;
  3236. }
  3237. }
  3238. // Perform trivial constant folding.
  3239. if (SDValue SV =
  3240. FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
  3241. return SV;
  3242. // Canonicalize constant to RHS if commutative.
  3243. if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
  3244. std::swap(N1C, N2C);
  3245. std::swap(N1, N2);
  3246. }
  3247. // Constant fold FP operations.
  3248. bool HasFPExceptions = TLI->hasFloatingPointExceptions();
  3249. ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
  3250. ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
  3251. if (N1CFP) {
  3252. if (!N2CFP && isCommutativeBinOp(Opcode)) {
  3253. // Canonicalize constant to RHS if commutative.
  3254. std::swap(N1CFP, N2CFP);
  3255. std::swap(N1, N2);
  3256. } else if (N2CFP) {
  3257. APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
  3258. APFloat::opStatus s;
  3259. switch (Opcode) {
  3260. case ISD::FADD:
  3261. s = V1.add(V2, APFloat::rmNearestTiesToEven);
  3262. if (!HasFPExceptions || s != APFloat::opInvalidOp)
  3263. return getConstantFP(V1, DL, VT);
  3264. break;
  3265. case ISD::FSUB:
  3266. s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
  3267. if (!HasFPExceptions || s!=APFloat::opInvalidOp)
  3268. return getConstantFP(V1, DL, VT);
  3269. break;
  3270. case ISD::FMUL:
  3271. s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
  3272. if (!HasFPExceptions || s!=APFloat::opInvalidOp)
  3273. return getConstantFP(V1, DL, VT);
  3274. break;
  3275. case ISD::FDIV:
  3276. s = V1.divide(V2, APFloat::rmNearestTiesToEven);
  3277. if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
  3278. s!=APFloat::opDivByZero)) {
  3279. return getConstantFP(V1, DL, VT);
  3280. }
  3281. break;
  3282. case ISD::FREM :
  3283. s = V1.mod(V2, APFloat::rmNearestTiesToEven);
  3284. if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
  3285. s!=APFloat::opDivByZero)) {
  3286. return getConstantFP(V1, DL, VT);
  3287. }
  3288. break;
  3289. case ISD::FCOPYSIGN:
  3290. V1.copySign(V2);
  3291. return getConstantFP(V1, DL, VT);
  3292. default: break;
  3293. }
  3294. }
  3295. if (Opcode == ISD::FP_ROUND) {
  3296. APFloat V = N1CFP->getValueAPF(); // make copy
  3297. bool ignored;
  3298. // This can return overflow, underflow, or inexact; we don't care.
  3299. // FIXME need to be more flexible about rounding mode.
  3300. (void)V.convert(EVTToAPFloatSemantics(VT),
  3301. APFloat::rmNearestTiesToEven, &ignored);
  3302. return getConstantFP(V, DL, VT);
  3303. }
  3304. }
  3305. // Canonicalize an UNDEF to the RHS, even over a constant.
  3306. if (N1.getOpcode() == ISD::UNDEF) {
  3307. if (isCommutativeBinOp(Opcode)) {
  3308. std::swap(N1, N2);
  3309. } else {
  3310. switch (Opcode) {
  3311. case ISD::FP_ROUND_INREG:
  3312. case ISD::SIGN_EXTEND_INREG:
  3313. case ISD::SUB:
  3314. case ISD::FSUB:
  3315. case ISD::FDIV:
  3316. case ISD::FREM:
  3317. case ISD::SRA:
  3318. return N1; // fold op(undef, arg2) -> undef
  3319. case ISD::UDIV:
  3320. case ISD::SDIV:
  3321. case ISD::UREM:
  3322. case ISD::SREM:
  3323. case ISD::SRL:
  3324. case ISD::SHL:
  3325. if (!VT.isVector())
  3326. return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
  3327. // For vectors, we can't easily build an all zero vector, just return
  3328. // the LHS.
  3329. return N2;
  3330. }
  3331. }
  3332. }
  3333. // Fold a bunch of operators when the RHS is undef.
  3334. if (N2.getOpcode() == ISD::UNDEF) {
  3335. switch (Opcode) {
  3336. case ISD::XOR:
  3337. if (N1.getOpcode() == ISD::UNDEF)
  3338. // Handle undef ^ undef -> 0 special case. This is a common
  3339. // idiom (misuse).
  3340. return getConstant(0, DL, VT);
  3341. // fallthrough
  3342. case ISD::ADD:
  3343. case ISD::ADDC:
  3344. case ISD::ADDE:
  3345. case ISD::SUB:
  3346. case ISD::UDIV:
  3347. case ISD::SDIV:
  3348. case ISD::UREM:
  3349. case ISD::SREM:
  3350. return N2; // fold op(arg1, undef) -> undef
  3351. case ISD::FADD:
  3352. case ISD::FSUB:
  3353. case ISD::FMUL:
  3354. case ISD::FDIV:
  3355. case ISD::FREM:
  3356. if (getTarget().Options.UnsafeFPMath)
  3357. return N2;
  3358. break;
  3359. case ISD::MUL:
  3360. case ISD::AND:
  3361. case ISD::SRL:
  3362. case ISD::SHL:
  3363. if (!VT.isVector())
  3364. return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
  3365. // For vectors, we can't easily build an all zero vector, just return
  3366. // the LHS.
  3367. return N1;
  3368. case ISD::OR:
  3369. if (!VT.isVector())
  3370. return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
  3371. // For vectors, we can't easily build an all one vector, just return
  3372. // the LHS.
  3373. return N1;
  3374. case ISD::SRA:
  3375. return N1;
  3376. }
  3377. }
  3378. // Memoize this node if possible.
  3379. BinarySDNode *N;
  3380. SDVTList VTs = getVTList(VT);
  3381. if (VT != MVT::Glue) {
  3382. SDValue Ops[] = {N1, N2};
  3383. FoldingSetNodeID ID;
  3384. AddNodeIDNode(ID, Opcode, VTs, Ops);
  3385. AddNodeIDFlags(ID, Opcode, Flags);
  3386. void *IP = nullptr;
  3387. if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
  3388. return SDValue(E, 0);
  3389. N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
  3390. CSEMap.InsertNode(N, IP);
  3391. } else {
  3392. N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
  3393. }
  3394. InsertNode(N);
  3395. return SDValue(N, 0);
  3396. }
  3397. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
  3398. SDValue N1, SDValue N2, SDValue N3) {
  3399. // Perform various simplifications.
  3400. ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
  3401. switch (Opcode) {
  3402. case ISD::FMA: {
  3403. ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
  3404. ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
  3405. ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
  3406. if (N1CFP && N2CFP && N3CFP) {
  3407. APFloat V1 = N1CFP->getValueAPF();
  3408. const APFloat &V2 = N2CFP->getValueAPF();
  3409. const APFloat &V3 = N3CFP->getValueAPF();
  3410. APFloat::opStatus s =
  3411. V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
  3412. if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp)
  3413. return getConstantFP(V1, DL, VT);
  3414. }
  3415. break;
  3416. }
  3417. case ISD::CONCAT_VECTORS:
  3418. // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
  3419. // one big BUILD_VECTOR.
  3420. if (N1.getOpcode() == ISD::BUILD_VECTOR &&
  3421. N2.getOpcode() == ISD::BUILD_VECTOR &&
  3422. N3.getOpcode() == ISD::BUILD_VECTOR) {
  3423. SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
  3424. N1.getNode()->op_end());
  3425. Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
  3426. Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
  3427. return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
  3428. }
  3429. break;
  3430. case ISD::SETCC: {
  3431. // Use FoldSetCC to simplify SETCC's.
  3432. SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
  3433. if (Simp.getNode()) return Simp;
  3434. break;
  3435. }
  3436. case ISD::SELECT:
  3437. if (N1C) {
  3438. if (N1C->getZExtValue())
  3439. return N2; // select true, X, Y -> X
  3440. return N3; // select false, X, Y -> Y
  3441. }
  3442. if (N2 == N3) return N2; // select C, X, X -> X
  3443. break;
  3444. case ISD::VECTOR_SHUFFLE:
  3445. llvm_unreachable("should use getVectorShuffle constructor!");
  3446. case ISD::INSERT_SUBVECTOR: {
  3447. SDValue Index = N3;
  3448. if (VT.isSimple() && N1.getValueType().isSimple()
  3449. && N2.getValueType().isSimple()) {
  3450. assert(VT.isVector() && N1.getValueType().isVector() &&
  3451. N2.getValueType().isVector() &&
  3452. "Insert subvector VTs must be a vectors");
  3453. assert(VT == N1.getValueType() &&
  3454. "Dest and insert subvector source types must match!");
  3455. assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
  3456. "Insert subvector must be from smaller vector to larger vector!");
  3457. if (isa<ConstantSDNode>(Index)) {
  3458. assert((N2.getValueType().getVectorNumElements() +
  3459. cast<ConstantSDNode>(Index)->getZExtValue()
  3460. <= VT.getVectorNumElements())
  3461. && "Insert subvector overflow!");
  3462. }
  3463. // Trivial insertion.
  3464. if (VT.getSimpleVT() == N2.getSimpleValueType())
  3465. return N2;
  3466. }
  3467. break;
  3468. }
  3469. case ISD::BITCAST:
  3470. // Fold bit_convert nodes from a type to themselves.
  3471. if (N1.getValueType() == VT)
  3472. return N1;
  3473. break;
  3474. }
  3475. // Memoize node if it doesn't produce a flag.
  3476. SDNode *N;
  3477. SDVTList VTs = getVTList(VT);
  3478. if (VT != MVT::Glue) {
  3479. SDValue Ops[] = { N1, N2, N3 };
  3480. FoldingSetNodeID ID;
  3481. AddNodeIDNode(ID, Opcode, VTs, Ops);
  3482. void *IP = nullptr;
  3483. if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
  3484. return SDValue(E, 0);
  3485. N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
  3486. DL.getDebugLoc(), VTs, N1, N2, N3);
  3487. CSEMap.InsertNode(N, IP);
  3488. } else {
  3489. N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
  3490. DL.getDebugLoc(), VTs, N1, N2, N3);
  3491. }
  3492. InsertNode(N);
  3493. return SDValue(N, 0);
  3494. }
  3495. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
  3496. SDValue N1, SDValue N2, SDValue N3,
  3497. SDValue N4) {
  3498. SDValue Ops[] = { N1, N2, N3, N4 };
  3499. return getNode(Opcode, DL, VT, Ops);
  3500. }
  3501. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
  3502. SDValue N1, SDValue N2, SDValue N3,
  3503. SDValue N4, SDValue N5) {
  3504. SDValue Ops[] = { N1, N2, N3, N4, N5 };
  3505. return getNode(Opcode, DL, VT, Ops);
  3506. }
  3507. /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
  3508. /// the incoming stack arguments to be loaded from the stack.
  3509. SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
  3510. SmallVector<SDValue, 8> ArgChains;
  3511. // Include the original chain at the beginning of the list. When this is
  3512. // used by target LowerCall hooks, this helps legalize find the
  3513. // CALLSEQ_BEGIN node.
  3514. ArgChains.push_back(Chain);
  3515. // Add a chain value for each stack argument.
  3516. for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
  3517. UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
  3518. if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
  3519. if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
  3520. if (FI->getIndex() < 0)
  3521. ArgChains.push_back(SDValue(L, 1));
  3522. // Build a tokenfactor for all the chains.
  3523. return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
  3524. }
  3525. /// getMemsetValue - Vectorized representation of the memset value
  3526. /// operand.
  3527. static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
  3528. SDLoc dl) {
  3529. assert(Value.getOpcode() != ISD::UNDEF);
  3530. unsigned NumBits = VT.getScalarType().getSizeInBits();
  3531. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
  3532. assert(C->getAPIntValue().getBitWidth() == 8);
  3533. APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
  3534. if (VT.isInteger())
  3535. return DAG.getConstant(Val, dl, VT);
  3536. return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
  3537. VT);
  3538. }
  3539. assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
  3540. EVT IntVT = VT.getScalarType();
  3541. if (!IntVT.isInteger())
  3542. IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
  3543. Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
  3544. if (NumBits > 8) {
  3545. // Use a multiplication with 0x010101... to extend the input to the
  3546. // required length.
  3547. APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
  3548. Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
  3549. DAG.getConstant(Magic, dl, IntVT));
  3550. }
  3551. if (VT != Value.getValueType() && !VT.isInteger())
  3552. Value = DAG.getNode(ISD::BITCAST, dl, VT.getScalarType(), Value);
  3553. if (VT != Value.getValueType()) {
  3554. assert(VT.getVectorElementType() == Value.getValueType() &&
  3555. "value type should be one vector element here");
  3556. SmallVector<SDValue, 8> BVOps(VT.getVectorNumElements(), Value);
  3557. Value = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, BVOps);
  3558. }
  3559. return Value;
  3560. }
  3561. /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
  3562. /// used when a memcpy is turned into a memset when the source is a constant
  3563. /// string ptr.
  3564. static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
  3565. const TargetLowering &TLI, StringRef Str) {
  3566. // Handle vector with all elements zero.
  3567. if (Str.empty()) {
  3568. if (VT.isInteger())
  3569. return DAG.getConstant(0, dl, VT);
  3570. else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
  3571. return DAG.getConstantFP(0.0, dl, VT);
  3572. else if (VT.isVector()) {
  3573. unsigned NumElts = VT.getVectorNumElements();
  3574. MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
  3575. return DAG.getNode(ISD::BITCAST, dl, VT,
  3576. DAG.getConstant(0, dl,
  3577. EVT::getVectorVT(*DAG.getContext(),
  3578. EltVT, NumElts)));
  3579. } else
  3580. llvm_unreachable("Expected type!");
  3581. }
  3582. assert(!VT.isVector() && "Can't handle vector type here!");
  3583. unsigned NumVTBits = VT.getSizeInBits();
  3584. unsigned NumVTBytes = NumVTBits / 8;
  3585. unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
  3586. APInt Val(NumVTBits, 0);
  3587. if (DAG.getDataLayout().isLittleEndian()) {
  3588. for (unsigned i = 0; i != NumBytes; ++i)
  3589. Val |= (uint64_t)(unsigned char)Str[i] << i*8;
  3590. } else {
  3591. for (unsigned i = 0; i != NumBytes; ++i)
  3592. Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
  3593. }
  3594. // If the "cost" of materializing the integer immediate is less than the cost
  3595. // of a load, then it is cost effective to turn the load into the immediate.
  3596. Type *Ty = VT.getTypeForEVT(*DAG.getContext());
  3597. if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
  3598. return DAG.getConstant(Val, dl, VT);
  3599. return SDValue(nullptr, 0);
  3600. }
  3601. /// getMemBasePlusOffset - Returns base and offset node for the
  3602. ///
  3603. static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
  3604. SelectionDAG &DAG) {
  3605. EVT VT = Base.getValueType();
  3606. return DAG.getNode(ISD::ADD, dl,
  3607. VT, Base, DAG.getConstant(Offset, dl, VT));
  3608. }
  3609. /// isMemSrcFromString - Returns true if memcpy source is a string constant.
  3610. ///
  3611. static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
  3612. unsigned SrcDelta = 0;
  3613. GlobalAddressSDNode *G = nullptr;
  3614. if (Src.getOpcode() == ISD::GlobalAddress)
  3615. G = cast<GlobalAddressSDNode>(Src);
  3616. else if (Src.getOpcode() == ISD::ADD &&
  3617. Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
  3618. Src.getOperand(1).getOpcode() == ISD::Constant) {
  3619. G = cast<GlobalAddressSDNode>(Src.getOperand(0));
  3620. SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
  3621. }
  3622. if (!G)
  3623. return false;
  3624. return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
  3625. }
  3626. /// Determines the optimal series of memory ops to replace the memset / memcpy.
  3627. /// Return true if the number of memory ops is below the threshold (Limit).
  3628. /// It returns the types of the sequence of memory ops to perform
  3629. /// memset / memcpy by reference.
  3630. static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
  3631. unsigned Limit, uint64_t Size,
  3632. unsigned DstAlign, unsigned SrcAlign,
  3633. bool IsMemset,
  3634. bool ZeroMemset,
  3635. bool MemcpyStrSrc,
  3636. bool AllowOverlap,
  3637. SelectionDAG &DAG,
  3638. const TargetLowering &TLI) {
  3639. assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
  3640. "Expecting memcpy / memset source to meet alignment requirement!");
  3641. // If 'SrcAlign' is zero, that means the memory operation does not need to
  3642. // load the value, i.e. memset or memcpy from constant string. Otherwise,
  3643. // it's the inferred alignment of the source. 'DstAlign', on the other hand,
  3644. // is the specified alignment of the memory operation. If it is zero, that
  3645. // means it's possible to change the alignment of the destination.
  3646. // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
  3647. // not need to be loaded.
  3648. EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
  3649. IsMemset, ZeroMemset, MemcpyStrSrc,
  3650. DAG.getMachineFunction());
  3651. if (VT == MVT::Other) {
  3652. unsigned AS = 0;
  3653. if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(AS) ||
  3654. TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) {
  3655. VT = TLI.getPointerTy(DAG.getDataLayout());
  3656. } else {
  3657. switch (DstAlign & 7) {
  3658. case 0: VT = MVT::i64; break;
  3659. case 4: VT = MVT::i32; break;
  3660. case 2: VT = MVT::i16; break;
  3661. default: VT = MVT::i8; break;
  3662. }
  3663. }
  3664. MVT LVT = MVT::i64;
  3665. while (!TLI.isTypeLegal(LVT))
  3666. LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
  3667. assert(LVT.isInteger());
  3668. if (VT.bitsGT(LVT))
  3669. VT = LVT;
  3670. }
  3671. unsigned NumMemOps = 0;
  3672. while (Size != 0) {
  3673. unsigned VTSize = VT.getSizeInBits() / 8;
  3674. while (VTSize > Size) {
  3675. // For now, only use non-vector load / store's for the left-over pieces.
  3676. EVT NewVT = VT;
  3677. unsigned NewVTSize;
  3678. bool Found = false;
  3679. if (VT.isVector() || VT.isFloatingPoint()) {
  3680. NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
  3681. if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
  3682. TLI.isSafeMemOpType(NewVT.getSimpleVT()))
  3683. Found = true;
  3684. else if (NewVT == MVT::i64 &&
  3685. TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
  3686. TLI.isSafeMemOpType(MVT::f64)) {
  3687. // i64 is usually not legal on 32-bit targets, but f64 may be.
  3688. NewVT = MVT::f64;
  3689. Found = true;
  3690. }
  3691. }
  3692. if (!Found) {
  3693. do {
  3694. NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
  3695. if (NewVT == MVT::i8)
  3696. break;
  3697. } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
  3698. }
  3699. NewVTSize = NewVT.getSizeInBits() / 8;
  3700. // If the new VT cannot cover all of the remaining bits, then consider
  3701. // issuing a (or a pair of) unaligned and overlapping load / store.
  3702. // FIXME: Only does this for 64-bit or more since we don't have proper
  3703. // cost model for unaligned load / store.
  3704. bool Fast;
  3705. unsigned AS = 0;
  3706. if (NumMemOps && AllowOverlap &&
  3707. VTSize >= 8 && NewVTSize < Size &&
  3708. TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign, &Fast) && Fast)
  3709. VTSize = Size;
  3710. else {
  3711. VT = NewVT;
  3712. VTSize = NewVTSize;
  3713. }
  3714. }
  3715. if (++NumMemOps > Limit)
  3716. return false;
  3717. MemOps.push_back(VT);
  3718. Size -= VTSize;
  3719. }
  3720. return true;
  3721. }
  3722. static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
  3723. SDValue Chain, SDValue Dst,
  3724. SDValue Src, uint64_t Size,
  3725. unsigned Align, bool isVol,
  3726. bool AlwaysInline,
  3727. MachinePointerInfo DstPtrInfo,
  3728. MachinePointerInfo SrcPtrInfo) {
  3729. // Turn a memcpy of undef to nop.
  3730. if (Src.getOpcode() == ISD::UNDEF)
  3731. return Chain;
  3732. // Expand memcpy to a series of load and store ops if the size operand falls
  3733. // below a certain threshold.
  3734. // TODO: In the AlwaysInline case, if the size is big then generate a loop
  3735. // rather than maybe a humongous number of loads and stores.
  3736. const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  3737. std::vector<EVT> MemOps;
  3738. bool DstAlignCanChange = false;
  3739. MachineFunction &MF = DAG.getMachineFunction();
  3740. MachineFrameInfo *MFI = MF.getFrameInfo();
  3741. bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
  3742. FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
  3743. if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
  3744. DstAlignCanChange = true;
  3745. unsigned SrcAlign = DAG.InferPtrAlignment(Src);
  3746. if (Align > SrcAlign)
  3747. SrcAlign = Align;
  3748. StringRef Str;
  3749. bool CopyFromStr = isMemSrcFromString(Src, Str);
  3750. bool isZeroStr = CopyFromStr && Str.empty();
  3751. unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
  3752. if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
  3753. (DstAlignCanChange ? 0 : Align),
  3754. (isZeroStr ? 0 : SrcAlign),
  3755. false, false, CopyFromStr, true, DAG, TLI))
  3756. return SDValue();
  3757. if (DstAlignCanChange) {
  3758. Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
  3759. unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
  3760. // Don't promote to an alignment that would require dynamic stack
  3761. // realignment.
  3762. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
  3763. if (!TRI->needsStackRealignment(MF))
  3764. while (NewAlign > Align &&
  3765. DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign))
  3766. NewAlign /= 2;
  3767. if (NewAlign > Align) {
  3768. // Give the stack frame object a larger alignment if needed.
  3769. if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
  3770. MFI->setObjectAlignment(FI->getIndex(), NewAlign);
  3771. Align = NewAlign;
  3772. }
  3773. }
  3774. SmallVector<SDValue, 8> OutChains;
  3775. unsigned NumMemOps = MemOps.size();
  3776. uint64_t SrcOff = 0, DstOff = 0;
  3777. for (unsigned i = 0; i != NumMemOps; ++i) {
  3778. EVT VT = MemOps[i];
  3779. unsigned VTSize = VT.getSizeInBits() / 8;
  3780. SDValue Value, Store;
  3781. if (VTSize > Size) {
  3782. // Issuing an unaligned load / store pair that overlaps with the previous
  3783. // pair. Adjust the offset accordingly.
  3784. assert(i == NumMemOps-1 && i != 0);
  3785. SrcOff -= VTSize - Size;
  3786. DstOff -= VTSize - Size;
  3787. }
  3788. if (CopyFromStr &&
  3789. (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
  3790. // It's unlikely a store of a vector immediate can be done in a single
  3791. // instruction. It would require a load from a constantpool first.
  3792. // We only handle zero vectors here.
  3793. // FIXME: Handle other cases where store of vector immediate is done in
  3794. // a single instruction.
  3795. Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
  3796. if (Value.getNode())
  3797. Store = DAG.getStore(Chain, dl, Value,
  3798. getMemBasePlusOffset(Dst, DstOff, dl, DAG),
  3799. DstPtrInfo.getWithOffset(DstOff), isVol,
  3800. false, Align);
  3801. }
  3802. if (!Store.getNode()) {
  3803. // The type might not be legal for the target. This should only happen
  3804. // if the type is smaller than a legal type, as on PPC, so the right
  3805. // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
  3806. // to Load/Store if NVT==VT.
  3807. // FIXME does the case above also need this?
  3808. EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
  3809. assert(NVT.bitsGE(VT));
  3810. Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
  3811. getMemBasePlusOffset(Src, SrcOff, dl, DAG),
  3812. SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
  3813. false, MinAlign(SrcAlign, SrcOff));
  3814. Store = DAG.getTruncStore(Chain, dl, Value,
  3815. getMemBasePlusOffset(Dst, DstOff, dl, DAG),
  3816. DstPtrInfo.getWithOffset(DstOff), VT, isVol,
  3817. false, Align);
  3818. }
  3819. OutChains.push_back(Store);
  3820. SrcOff += VTSize;
  3821. DstOff += VTSize;
  3822. Size -= VTSize;
  3823. }
  3824. return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
  3825. }
  3826. static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
  3827. SDValue Chain, SDValue Dst,
  3828. SDValue Src, uint64_t Size,
  3829. unsigned Align, bool isVol,
  3830. bool AlwaysInline,
  3831. MachinePointerInfo DstPtrInfo,
  3832. MachinePointerInfo SrcPtrInfo) {
  3833. // Turn a memmove of undef to nop.
  3834. if (Src.getOpcode() == ISD::UNDEF)
  3835. return Chain;
  3836. // Expand memmove to a series of load and store ops if the size operand falls
  3837. // below a certain threshold.
  3838. const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  3839. std::vector<EVT> MemOps;
  3840. bool DstAlignCanChange = false;
  3841. MachineFunction &MF = DAG.getMachineFunction();
  3842. MachineFrameInfo *MFI = MF.getFrameInfo();
  3843. bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
  3844. FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
  3845. if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
  3846. DstAlignCanChange = true;
  3847. unsigned SrcAlign = DAG.InferPtrAlignment(Src);
  3848. if (Align > SrcAlign)
  3849. SrcAlign = Align;
  3850. unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
  3851. if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
  3852. (DstAlignCanChange ? 0 : Align), SrcAlign,
  3853. false, false, false, false, DAG, TLI))
  3854. return SDValue();
  3855. if (DstAlignCanChange) {
  3856. Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
  3857. unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
  3858. if (NewAlign > Align) {
  3859. // Give the stack frame object a larger alignment if needed.
  3860. if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
  3861. MFI->setObjectAlignment(FI->getIndex(), NewAlign);
  3862. Align = NewAlign;
  3863. }
  3864. }
  3865. uint64_t SrcOff = 0, DstOff = 0;
  3866. SmallVector<SDValue, 8> LoadValues;
  3867. SmallVector<SDValue, 8> LoadChains;
  3868. SmallVector<SDValue, 8> OutChains;
  3869. unsigned NumMemOps = MemOps.size();
  3870. for (unsigned i = 0; i < NumMemOps; i++) {
  3871. EVT VT = MemOps[i];
  3872. unsigned VTSize = VT.getSizeInBits() / 8;
  3873. SDValue Value;
  3874. Value = DAG.getLoad(VT, dl, Chain,
  3875. getMemBasePlusOffset(Src, SrcOff, dl, DAG),
  3876. SrcPtrInfo.getWithOffset(SrcOff), isVol,
  3877. false, false, SrcAlign);
  3878. LoadValues.push_back(Value);
  3879. LoadChains.push_back(Value.getValue(1));
  3880. SrcOff += VTSize;
  3881. }
  3882. Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
  3883. OutChains.clear();
  3884. for (unsigned i = 0; i < NumMemOps; i++) {
  3885. EVT VT = MemOps[i];
  3886. unsigned VTSize = VT.getSizeInBits() / 8;
  3887. SDValue Store;
  3888. Store = DAG.getStore(Chain, dl, LoadValues[i],
  3889. getMemBasePlusOffset(Dst, DstOff, dl, DAG),
  3890. DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
  3891. OutChains.push_back(Store);
  3892. DstOff += VTSize;
  3893. }
  3894. return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
  3895. }
  3896. /// \brief Lower the call to 'memset' intrinsic function into a series of store
  3897. /// operations.
  3898. ///
  3899. /// \param DAG Selection DAG where lowered code is placed.
  3900. /// \param dl Link to corresponding IR location.
  3901. /// \param Chain Control flow dependency.
  3902. /// \param Dst Pointer to destination memory location.
  3903. /// \param Src Value of byte to write into the memory.
  3904. /// \param Size Number of bytes to write.
  3905. /// \param Align Alignment of the destination in bytes.
  3906. /// \param isVol True if destination is volatile.
  3907. /// \param DstPtrInfo IR information on the memory pointer.
  3908. /// \returns New head in the control flow, if lowering was successful, empty
  3909. /// SDValue otherwise.
  3910. ///
  3911. /// The function tries to replace 'llvm.memset' intrinsic with several store
  3912. /// operations and value calculation code. This is usually profitable for small
  3913. /// memory size.
  3914. static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
  3915. SDValue Chain, SDValue Dst,
  3916. SDValue Src, uint64_t Size,
  3917. unsigned Align, bool isVol,
  3918. MachinePointerInfo DstPtrInfo) {
  3919. // Turn a memset of undef to nop.
  3920. if (Src.getOpcode() == ISD::UNDEF)
  3921. return Chain;
  3922. // Expand memset to a series of load/store ops if the size operand
  3923. // falls below a certain threshold.
  3924. const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  3925. std::vector<EVT> MemOps;
  3926. bool DstAlignCanChange = false;
  3927. MachineFunction &MF = DAG.getMachineFunction();
  3928. MachineFrameInfo *MFI = MF.getFrameInfo();
  3929. bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
  3930. FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
  3931. if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
  3932. DstAlignCanChange = true;
  3933. bool IsZeroVal =
  3934. isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
  3935. if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
  3936. Size, (DstAlignCanChange ? 0 : Align), 0,
  3937. true, IsZeroVal, false, true, DAG, TLI))
  3938. return SDValue();
  3939. if (DstAlignCanChange) {
  3940. Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
  3941. unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
  3942. if (NewAlign > Align) {
  3943. // Give the stack frame object a larger alignment if needed.
  3944. if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
  3945. MFI->setObjectAlignment(FI->getIndex(), NewAlign);
  3946. Align = NewAlign;
  3947. }
  3948. }
  3949. SmallVector<SDValue, 8> OutChains;
  3950. uint64_t DstOff = 0;
  3951. unsigned NumMemOps = MemOps.size();
  3952. // Find the largest store and generate the bit pattern for it.
  3953. EVT LargestVT = MemOps[0];
  3954. for (unsigned i = 1; i < NumMemOps; i++)
  3955. if (MemOps[i].bitsGT(LargestVT))
  3956. LargestVT = MemOps[i];
  3957. SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
  3958. for (unsigned i = 0; i < NumMemOps; i++) {
  3959. EVT VT = MemOps[i];
  3960. unsigned VTSize = VT.getSizeInBits() / 8;
  3961. if (VTSize > Size) {
  3962. // Issuing an unaligned load / store pair that overlaps with the previous
  3963. // pair. Adjust the offset accordingly.
  3964. assert(i == NumMemOps-1 && i != 0);
  3965. DstOff -= VTSize - Size;
  3966. }
  3967. // If this store is smaller than the largest store see whether we can get
  3968. // the smaller value for free with a truncate.
  3969. SDValue Value = MemSetValue;
  3970. if (VT.bitsLT(LargestVT)) {
  3971. if (!LargestVT.isVector() && !VT.isVector() &&
  3972. TLI.isTruncateFree(LargestVT, VT))
  3973. Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
  3974. else
  3975. Value = getMemsetValue(Src, VT, DAG, dl);
  3976. }
  3977. assert(Value.getValueType() == VT && "Value with wrong type.");
  3978. SDValue Store = DAG.getStore(Chain, dl, Value,
  3979. getMemBasePlusOffset(Dst, DstOff, dl, DAG),
  3980. DstPtrInfo.getWithOffset(DstOff),
  3981. isVol, false, Align);
  3982. OutChains.push_back(Store);
  3983. DstOff += VT.getSizeInBits() / 8;
  3984. Size -= VTSize;
  3985. }
  3986. return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
  3987. }
  3988. SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
  3989. SDValue Src, SDValue Size,
  3990. unsigned Align, bool isVol, bool AlwaysInline,
  3991. bool isTailCall, MachinePointerInfo DstPtrInfo,
  3992. MachinePointerInfo SrcPtrInfo) {
  3993. assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
  3994. // Check to see if we should lower the memcpy to loads and stores first.
  3995. // For cases within the target-specified limits, this is the best choice.
  3996. ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
  3997. if (ConstantSize) {
  3998. // Memcpy with size zero? Just return the original chain.
  3999. if (ConstantSize->isNullValue())
  4000. return Chain;
  4001. SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
  4002. ConstantSize->getZExtValue(),Align,
  4003. isVol, false, DstPtrInfo, SrcPtrInfo);
  4004. if (Result.getNode())
  4005. return Result;
  4006. }
  4007. // Then check to see if we should lower the memcpy with target-specific
  4008. // code. If the target chooses to do this, this is the next best.
  4009. if (TSI) {
  4010. SDValue Result = TSI->EmitTargetCodeForMemcpy(
  4011. *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
  4012. DstPtrInfo, SrcPtrInfo);
  4013. if (Result.getNode())
  4014. return Result;
  4015. }
  4016. // If we really need inline code and the target declined to provide it,
  4017. // use a (potentially long) sequence of loads and stores.
  4018. if (AlwaysInline) {
  4019. assert(ConstantSize && "AlwaysInline requires a constant size!");
  4020. return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
  4021. ConstantSize->getZExtValue(), Align, isVol,
  4022. true, DstPtrInfo, SrcPtrInfo);
  4023. }
  4024. // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
  4025. // memcpy is not guaranteed to be safe. libc memcpys aren't required to
  4026. // respect volatile, so they may do things like read or write memory
  4027. // beyond the given memory regions. But fixing this isn't easy, and most
  4028. // people don't care.
  4029. // Emit a library call.
  4030. TargetLowering::ArgListTy Args;
  4031. TargetLowering::ArgListEntry Entry;
  4032. Entry.Ty = getDataLayout().getIntPtrType(*getContext());
  4033. Entry.Node = Dst; Args.push_back(Entry);
  4034. Entry.Node = Src; Args.push_back(Entry);
  4035. Entry.Node = Size; Args.push_back(Entry);
  4036. // FIXME: pass in SDLoc
  4037. TargetLowering::CallLoweringInfo CLI(*this);
  4038. CLI.setDebugLoc(dl)
  4039. .setChain(Chain)
  4040. .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
  4041. Type::getVoidTy(*getContext()),
  4042. getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
  4043. TLI->getPointerTy(getDataLayout())),
  4044. std::move(Args), 0)
  4045. .setDiscardResult()
  4046. .setTailCall(isTailCall);
  4047. std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
  4048. return CallResult.second;
  4049. }
  4050. SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
  4051. SDValue Src, SDValue Size,
  4052. unsigned Align, bool isVol, bool isTailCall,
  4053. MachinePointerInfo DstPtrInfo,
  4054. MachinePointerInfo SrcPtrInfo) {
  4055. assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
  4056. // Check to see if we should lower the memmove to loads and stores first.
  4057. // For cases within the target-specified limits, this is the best choice.
  4058. ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
  4059. if (ConstantSize) {
  4060. // Memmove with size zero? Just return the original chain.
  4061. if (ConstantSize->isNullValue())
  4062. return Chain;
  4063. SDValue Result =
  4064. getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
  4065. ConstantSize->getZExtValue(), Align, isVol,
  4066. false, DstPtrInfo, SrcPtrInfo);
  4067. if (Result.getNode())
  4068. return Result;
  4069. }
  4070. // Then check to see if we should lower the memmove with target-specific
  4071. // code. If the target chooses to do this, this is the next best.
  4072. if (TSI) {
  4073. SDValue Result = TSI->EmitTargetCodeForMemmove(
  4074. *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
  4075. if (Result.getNode())
  4076. return Result;
  4077. }
  4078. // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
  4079. // not be safe. See memcpy above for more details.
  4080. // Emit a library call.
  4081. TargetLowering::ArgListTy Args;
  4082. TargetLowering::ArgListEntry Entry;
  4083. Entry.Ty = getDataLayout().getIntPtrType(*getContext());
  4084. Entry.Node = Dst; Args.push_back(Entry);
  4085. Entry.Node = Src; Args.push_back(Entry);
  4086. Entry.Node = Size; Args.push_back(Entry);
  4087. // FIXME: pass in SDLoc
  4088. TargetLowering::CallLoweringInfo CLI(*this);
  4089. CLI.setDebugLoc(dl)
  4090. .setChain(Chain)
  4091. .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
  4092. Type::getVoidTy(*getContext()),
  4093. getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
  4094. TLI->getPointerTy(getDataLayout())),
  4095. std::move(Args), 0)
  4096. .setDiscardResult()
  4097. .setTailCall(isTailCall);
  4098. std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
  4099. return CallResult.second;
  4100. }
  4101. SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
  4102. SDValue Src, SDValue Size,
  4103. unsigned Align, bool isVol, bool isTailCall,
  4104. MachinePointerInfo DstPtrInfo) {
  4105. assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
  4106. // Check to see if we should lower the memset to stores first.
  4107. // For cases within the target-specified limits, this is the best choice.
  4108. ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
  4109. if (ConstantSize) {
  4110. // Memset with size zero? Just return the original chain.
  4111. if (ConstantSize->isNullValue())
  4112. return Chain;
  4113. SDValue Result =
  4114. getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
  4115. Align, isVol, DstPtrInfo);
  4116. if (Result.getNode())
  4117. return Result;
  4118. }
  4119. // Then check to see if we should lower the memset with target-specific
  4120. // code. If the target chooses to do this, this is the next best.
  4121. if (TSI) {
  4122. SDValue Result = TSI->EmitTargetCodeForMemset(
  4123. *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
  4124. if (Result.getNode())
  4125. return Result;
  4126. }
  4127. // Emit a library call.
  4128. Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
  4129. TargetLowering::ArgListTy Args;
  4130. TargetLowering::ArgListEntry Entry;
  4131. Entry.Node = Dst; Entry.Ty = IntPtrTy;
  4132. Args.push_back(Entry);
  4133. Entry.Node = Src;
  4134. Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
  4135. Args.push_back(Entry);
  4136. Entry.Node = Size;
  4137. Entry.Ty = IntPtrTy;
  4138. Args.push_back(Entry);
  4139. // FIXME: pass in SDLoc
  4140. TargetLowering::CallLoweringInfo CLI(*this);
  4141. CLI.setDebugLoc(dl)
  4142. .setChain(Chain)
  4143. .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
  4144. Type::getVoidTy(*getContext()),
  4145. getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
  4146. TLI->getPointerTy(getDataLayout())),
  4147. std::move(Args), 0)
  4148. .setDiscardResult()
  4149. .setTailCall(isTailCall);
  4150. std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
  4151. return CallResult.second;
  4152. }
  4153. SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
  4154. SDVTList VTList, ArrayRef<SDValue> Ops,
  4155. MachineMemOperand *MMO,
  4156. AtomicOrdering SuccessOrdering,
  4157. AtomicOrdering FailureOrdering,
  4158. SynchronizationScope SynchScope) {
  4159. FoldingSetNodeID ID;
  4160. ID.AddInteger(MemVT.getRawBits());
  4161. AddNodeIDNode(ID, Opcode, VTList, Ops);
  4162. ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
  4163. void* IP = nullptr;
  4164. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
  4165. cast<AtomicSDNode>(E)->refineAlignment(MMO);
  4166. return SDValue(E, 0);
  4167. }
  4168. // Allocate the operands array for the node out of the BumpPtrAllocator, since
  4169. // SDNode doesn't have access to it. This memory will be "leaked" when
  4170. // the node is deallocated, but recovered when the allocator is released.
  4171. // If the number of operands is less than 5 we use AtomicSDNode's internal
  4172. // storage.
  4173. unsigned NumOps = Ops.size();
  4174. SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps)
  4175. : nullptr;
  4176. SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
  4177. dl.getDebugLoc(), VTList, MemVT,
  4178. Ops.data(), DynOps, NumOps, MMO,
  4179. SuccessOrdering, FailureOrdering,
  4180. SynchScope);
  4181. CSEMap.InsertNode(N, IP);
  4182. InsertNode(N);
  4183. return SDValue(N, 0);
  4184. }
  4185. SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
  4186. SDVTList VTList, ArrayRef<SDValue> Ops,
  4187. MachineMemOperand *MMO,
  4188. AtomicOrdering Ordering,
  4189. SynchronizationScope SynchScope) {
  4190. return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering,
  4191. Ordering, SynchScope);
  4192. }
  4193. SDValue SelectionDAG::getAtomicCmpSwap(
  4194. unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs, SDValue Chain,
  4195. SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
  4196. unsigned Alignment, AtomicOrdering SuccessOrdering,
  4197. AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
  4198. assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
  4199. Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
  4200. assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
  4201. if (Alignment == 0) // Ensure that codegen never sees alignment 0
  4202. Alignment = getEVTAlignment(MemVT);
  4203. MachineFunction &MF = getMachineFunction();
  4204. // FIXME: Volatile isn't really correct; we should keep track of atomic
  4205. // orderings in the memoperand.
  4206. unsigned Flags = MachineMemOperand::MOVolatile;
  4207. Flags |= MachineMemOperand::MOLoad;
  4208. Flags |= MachineMemOperand::MOStore;
  4209. MachineMemOperand *MMO =
  4210. MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
  4211. return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO,
  4212. SuccessOrdering, FailureOrdering, SynchScope);
  4213. }
  4214. SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT,
  4215. SDVTList VTs, SDValue Chain, SDValue Ptr,
  4216. SDValue Cmp, SDValue Swp,
  4217. MachineMemOperand *MMO,
  4218. AtomicOrdering SuccessOrdering,
  4219. AtomicOrdering FailureOrdering,
  4220. SynchronizationScope SynchScope) {
  4221. assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
  4222. Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
  4223. assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
  4224. SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
  4225. return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO,
  4226. SuccessOrdering, FailureOrdering, SynchScope);
  4227. }
  4228. SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
  4229. SDValue Chain,
  4230. SDValue Ptr, SDValue Val,
  4231. const Value* PtrVal,
  4232. unsigned Alignment,
  4233. AtomicOrdering Ordering,
  4234. SynchronizationScope SynchScope) {
  4235. if (Alignment == 0) // Ensure that codegen never sees alignment 0
  4236. Alignment = getEVTAlignment(MemVT);
  4237. MachineFunction &MF = getMachineFunction();
  4238. // An atomic store does not load. An atomic load does not store.
  4239. // (An atomicrmw obviously both loads and stores.)
  4240. // For now, atomics are considered to be volatile always, and they are
  4241. // chained as such.
  4242. // FIXME: Volatile isn't really correct; we should keep track of atomic
  4243. // orderings in the memoperand.
  4244. unsigned Flags = MachineMemOperand::MOVolatile;
  4245. if (Opcode != ISD::ATOMIC_STORE)
  4246. Flags |= MachineMemOperand::MOLoad;
  4247. if (Opcode != ISD::ATOMIC_LOAD)
  4248. Flags |= MachineMemOperand::MOStore;
  4249. MachineMemOperand *MMO =
  4250. MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
  4251. MemVT.getStoreSize(), Alignment);
  4252. return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
  4253. Ordering, SynchScope);
  4254. }
  4255. SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
  4256. SDValue Chain,
  4257. SDValue Ptr, SDValue Val,
  4258. MachineMemOperand *MMO,
  4259. AtomicOrdering Ordering,
  4260. SynchronizationScope SynchScope) {
  4261. assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
  4262. Opcode == ISD::ATOMIC_LOAD_SUB ||
  4263. Opcode == ISD::ATOMIC_LOAD_AND ||
  4264. Opcode == ISD::ATOMIC_LOAD_OR ||
  4265. Opcode == ISD::ATOMIC_LOAD_XOR ||
  4266. Opcode == ISD::ATOMIC_LOAD_NAND ||
  4267. Opcode == ISD::ATOMIC_LOAD_MIN ||
  4268. Opcode == ISD::ATOMIC_LOAD_MAX ||
  4269. Opcode == ISD::ATOMIC_LOAD_UMIN ||
  4270. Opcode == ISD::ATOMIC_LOAD_UMAX ||
  4271. Opcode == ISD::ATOMIC_SWAP ||
  4272. Opcode == ISD::ATOMIC_STORE) &&
  4273. "Invalid Atomic Op");
  4274. EVT VT = Val.getValueType();
  4275. SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
  4276. getVTList(VT, MVT::Other);
  4277. SDValue Ops[] = {Chain, Ptr, Val};
  4278. return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
  4279. }
  4280. SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
  4281. EVT VT, SDValue Chain,
  4282. SDValue Ptr,
  4283. MachineMemOperand *MMO,
  4284. AtomicOrdering Ordering,
  4285. SynchronizationScope SynchScope) {
  4286. assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
  4287. SDVTList VTs = getVTList(VT, MVT::Other);
  4288. SDValue Ops[] = {Chain, Ptr};
  4289. return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope);
  4290. }
  4291. /// getMergeValues - Create a MERGE_VALUES node from the given operands.
  4292. SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) {
  4293. if (Ops.size() == 1)
  4294. return Ops[0];
  4295. SmallVector<EVT, 4> VTs;
  4296. VTs.reserve(Ops.size());
  4297. for (unsigned i = 0; i < Ops.size(); ++i)
  4298. VTs.push_back(Ops[i].getValueType());
  4299. return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
  4300. }
  4301. SDValue
  4302. SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
  4303. ArrayRef<SDValue> Ops,
  4304. EVT MemVT, MachinePointerInfo PtrInfo,
  4305. unsigned Align, bool Vol,
  4306. bool ReadMem, bool WriteMem, unsigned Size) {
  4307. if (Align == 0) // Ensure that codegen never sees alignment 0
  4308. Align = getEVTAlignment(MemVT);
  4309. MachineFunction &MF = getMachineFunction();
  4310. unsigned Flags = 0;
  4311. if (WriteMem)
  4312. Flags |= MachineMemOperand::MOStore;
  4313. if (ReadMem)
  4314. Flags |= MachineMemOperand::MOLoad;
  4315. if (Vol)
  4316. Flags |= MachineMemOperand::MOVolatile;
  4317. if (!Size)
  4318. Size = MemVT.getStoreSize();
  4319. MachineMemOperand *MMO =
  4320. MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
  4321. return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
  4322. }
  4323. SDValue
  4324. SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
  4325. ArrayRef<SDValue> Ops, EVT MemVT,
  4326. MachineMemOperand *MMO) {
  4327. assert((Opcode == ISD::INTRINSIC_VOID ||
  4328. Opcode == ISD::INTRINSIC_W_CHAIN ||
  4329. Opcode == ISD::PREFETCH ||
  4330. Opcode == ISD::LIFETIME_START ||
  4331. Opcode == ISD::LIFETIME_END ||
  4332. (Opcode <= INT_MAX &&
  4333. (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
  4334. "Opcode is not a memory-accessing opcode!");
  4335. // Memoize the node unless it returns a flag.
  4336. MemIntrinsicSDNode *N;
  4337. if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
  4338. FoldingSetNodeID ID;
  4339. AddNodeIDNode(ID, Opcode, VTList, Ops);
  4340. ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
  4341. void *IP = nullptr;
  4342. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
  4343. cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
  4344. return SDValue(E, 0);
  4345. }
  4346. N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
  4347. dl.getDebugLoc(), VTList, Ops,
  4348. MemVT, MMO);
  4349. CSEMap.InsertNode(N, IP);
  4350. } else {
  4351. N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
  4352. dl.getDebugLoc(), VTList, Ops,
  4353. MemVT, MMO);
  4354. }
  4355. InsertNode(N);
  4356. return SDValue(N, 0);
  4357. }
  4358. /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
  4359. /// MachinePointerInfo record from it. This is particularly useful because the
  4360. /// code generator has many cases where it doesn't bother passing in a
  4361. /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
  4362. static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
  4363. // If this is FI+Offset, we can model it.
  4364. if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
  4365. return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
  4366. // If this is (FI+Offset1)+Offset2, we can model it.
  4367. if (Ptr.getOpcode() != ISD::ADD ||
  4368. !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
  4369. !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
  4370. return MachinePointerInfo();
  4371. int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
  4372. return MachinePointerInfo::getFixedStack(FI, Offset+
  4373. cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
  4374. }
  4375. /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
  4376. /// MachinePointerInfo record from it. This is particularly useful because the
  4377. /// code generator has many cases where it doesn't bother passing in a
  4378. /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
  4379. static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
  4380. // If the 'Offset' value isn't a constant, we can't handle this.
  4381. if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
  4382. return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
  4383. if (OffsetOp.getOpcode() == ISD::UNDEF)
  4384. return InferPointerInfo(Ptr);
  4385. return MachinePointerInfo();
  4386. }
  4387. SDValue
  4388. SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
  4389. EVT VT, SDLoc dl, SDValue Chain,
  4390. SDValue Ptr, SDValue Offset,
  4391. MachinePointerInfo PtrInfo, EVT MemVT,
  4392. bool isVolatile, bool isNonTemporal, bool isInvariant,
  4393. unsigned Alignment, const AAMDNodes &AAInfo,
  4394. const MDNode *Ranges) {
  4395. assert(Chain.getValueType() == MVT::Other &&
  4396. "Invalid chain type");
  4397. if (Alignment == 0) // Ensure that codegen never sees alignment 0
  4398. Alignment = getEVTAlignment(VT);
  4399. unsigned Flags = MachineMemOperand::MOLoad;
  4400. if (isVolatile)
  4401. Flags |= MachineMemOperand::MOVolatile;
  4402. if (isNonTemporal)
  4403. Flags |= MachineMemOperand::MONonTemporal;
  4404. if (isInvariant)
  4405. Flags |= MachineMemOperand::MOInvariant;
  4406. // If we don't have a PtrInfo, infer the trivial frame index case to simplify
  4407. // clients.
  4408. if (PtrInfo.V.isNull())
  4409. PtrInfo = InferPointerInfo(Ptr, Offset);
  4410. MachineFunction &MF = getMachineFunction();
  4411. MachineMemOperand *MMO =
  4412. MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
  4413. AAInfo, Ranges);
  4414. return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
  4415. }
  4416. SDValue
  4417. SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
  4418. EVT VT, SDLoc dl, SDValue Chain,
  4419. SDValue Ptr, SDValue Offset, EVT MemVT,
  4420. MachineMemOperand *MMO) {
  4421. if (VT == MemVT) {
  4422. ExtType = ISD::NON_EXTLOAD;
  4423. } else if (ExtType == ISD::NON_EXTLOAD) {
  4424. assert(VT == MemVT && "Non-extending load from different memory type!");
  4425. } else {
  4426. // Extending load.
  4427. assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
  4428. "Should only be an extending load, not truncating!");
  4429. assert(VT.isInteger() == MemVT.isInteger() &&
  4430. "Cannot convert from FP to Int or Int -> FP!");
  4431. assert(VT.isVector() == MemVT.isVector() &&
  4432. "Cannot use an ext load to convert to or from a vector!");
  4433. assert((!VT.isVector() ||
  4434. VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
  4435. "Cannot use an ext load to change the number of vector elements!");
  4436. }
  4437. bool Indexed = AM != ISD::UNINDEXED;
  4438. assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
  4439. "Unindexed load with an offset!");
  4440. SDVTList VTs = Indexed ?
  4441. getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
  4442. SDValue Ops[] = { Chain, Ptr, Offset };
  4443. FoldingSetNodeID ID;
  4444. AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
  4445. ID.AddInteger(MemVT.getRawBits());
  4446. ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
  4447. MMO->isNonTemporal(),
  4448. MMO->isInvariant()));
  4449. ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
  4450. void *IP = nullptr;
  4451. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
  4452. cast<LoadSDNode>(E)->refineAlignment(MMO);
  4453. return SDValue(E, 0);
  4454. }
  4455. SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
  4456. dl.getDebugLoc(), VTs, AM, ExtType,
  4457. MemVT, MMO);
  4458. CSEMap.InsertNode(N, IP);
  4459. InsertNode(N);
  4460. return SDValue(N, 0);
  4461. }
  4462. SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
  4463. SDValue Chain, SDValue Ptr,
  4464. MachinePointerInfo PtrInfo,
  4465. bool isVolatile, bool isNonTemporal,
  4466. bool isInvariant, unsigned Alignment,
  4467. const AAMDNodes &AAInfo,
  4468. const MDNode *Ranges) {
  4469. SDValue Undef = getUNDEF(Ptr.getValueType());
  4470. return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
  4471. PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
  4472. AAInfo, Ranges);
  4473. }
  4474. SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
  4475. SDValue Chain, SDValue Ptr,
  4476. MachineMemOperand *MMO) {
  4477. SDValue Undef = getUNDEF(Ptr.getValueType());
  4478. return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
  4479. VT, MMO);
  4480. }
  4481. SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
  4482. SDValue Chain, SDValue Ptr,
  4483. MachinePointerInfo PtrInfo, EVT MemVT,
  4484. bool isVolatile, bool isNonTemporal,
  4485. bool isInvariant, unsigned Alignment,
  4486. const AAMDNodes &AAInfo) {
  4487. SDValue Undef = getUNDEF(Ptr.getValueType());
  4488. return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
  4489. PtrInfo, MemVT, isVolatile, isNonTemporal, isInvariant,
  4490. Alignment, AAInfo);
  4491. }
  4492. SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
  4493. SDValue Chain, SDValue Ptr, EVT MemVT,
  4494. MachineMemOperand *MMO) {
  4495. SDValue Undef = getUNDEF(Ptr.getValueType());
  4496. return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
  4497. MemVT, MMO);
  4498. }
  4499. SDValue
  4500. SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
  4501. SDValue Offset, ISD::MemIndexedMode AM) {
  4502. LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
  4503. assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
  4504. "Load is already a indexed load!");
  4505. return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
  4506. LD->getChain(), Base, Offset, LD->getPointerInfo(),
  4507. LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
  4508. false, LD->getAlignment());
  4509. }
  4510. SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
  4511. SDValue Ptr, MachinePointerInfo PtrInfo,
  4512. bool isVolatile, bool isNonTemporal,
  4513. unsigned Alignment, const AAMDNodes &AAInfo) {
  4514. assert(Chain.getValueType() == MVT::Other &&
  4515. "Invalid chain type");
  4516. if (Alignment == 0) // Ensure that codegen never sees alignment 0
  4517. Alignment = getEVTAlignment(Val.getValueType());
  4518. unsigned Flags = MachineMemOperand::MOStore;
  4519. if (isVolatile)
  4520. Flags |= MachineMemOperand::MOVolatile;
  4521. if (isNonTemporal)
  4522. Flags |= MachineMemOperand::MONonTemporal;
  4523. if (PtrInfo.V.isNull())
  4524. PtrInfo = InferPointerInfo(Ptr);
  4525. MachineFunction &MF = getMachineFunction();
  4526. MachineMemOperand *MMO =
  4527. MF.getMachineMemOperand(PtrInfo, Flags,
  4528. Val.getValueType().getStoreSize(), Alignment,
  4529. AAInfo);
  4530. return getStore(Chain, dl, Val, Ptr, MMO);
  4531. }
  4532. SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
  4533. SDValue Ptr, MachineMemOperand *MMO) {
  4534. assert(Chain.getValueType() == MVT::Other &&
  4535. "Invalid chain type");
  4536. EVT VT = Val.getValueType();
  4537. SDVTList VTs = getVTList(MVT::Other);
  4538. SDValue Undef = getUNDEF(Ptr.getValueType());
  4539. SDValue Ops[] = { Chain, Val, Ptr, Undef };
  4540. FoldingSetNodeID ID;
  4541. AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
  4542. ID.AddInteger(VT.getRawBits());
  4543. ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
  4544. MMO->isNonTemporal(), MMO->isInvariant()));
  4545. ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
  4546. void *IP = nullptr;
  4547. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
  4548. cast<StoreSDNode>(E)->refineAlignment(MMO);
  4549. return SDValue(E, 0);
  4550. }
  4551. SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
  4552. dl.getDebugLoc(), VTs,
  4553. ISD::UNINDEXED, false, VT, MMO);
  4554. CSEMap.InsertNode(N, IP);
  4555. InsertNode(N);
  4556. return SDValue(N, 0);
  4557. }
  4558. SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
  4559. SDValue Ptr, MachinePointerInfo PtrInfo,
  4560. EVT SVT,bool isVolatile, bool isNonTemporal,
  4561. unsigned Alignment,
  4562. const AAMDNodes &AAInfo) {
  4563. assert(Chain.getValueType() == MVT::Other &&
  4564. "Invalid chain type");
  4565. if (Alignment == 0) // Ensure that codegen never sees alignment 0
  4566. Alignment = getEVTAlignment(SVT);
  4567. unsigned Flags = MachineMemOperand::MOStore;
  4568. if (isVolatile)
  4569. Flags |= MachineMemOperand::MOVolatile;
  4570. if (isNonTemporal)
  4571. Flags |= MachineMemOperand::MONonTemporal;
  4572. if (PtrInfo.V.isNull())
  4573. PtrInfo = InferPointerInfo(Ptr);
  4574. MachineFunction &MF = getMachineFunction();
  4575. MachineMemOperand *MMO =
  4576. MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
  4577. AAInfo);
  4578. return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
  4579. }
  4580. SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
  4581. SDValue Ptr, EVT SVT,
  4582. MachineMemOperand *MMO) {
  4583. EVT VT = Val.getValueType();
  4584. assert(Chain.getValueType() == MVT::Other &&
  4585. "Invalid chain type");
  4586. if (VT == SVT)
  4587. return getStore(Chain, dl, Val, Ptr, MMO);
  4588. assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
  4589. "Should only be a truncating store, not extending!");
  4590. assert(VT.isInteger() == SVT.isInteger() &&
  4591. "Can't do FP-INT conversion!");
  4592. assert(VT.isVector() == SVT.isVector() &&
  4593. "Cannot use trunc store to convert to or from a vector!");
  4594. assert((!VT.isVector() ||
  4595. VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
  4596. "Cannot use trunc store to change the number of vector elements!");
  4597. SDVTList VTs = getVTList(MVT::Other);
  4598. SDValue Undef = getUNDEF(Ptr.getValueType());
  4599. SDValue Ops[] = { Chain, Val, Ptr, Undef };
  4600. FoldingSetNodeID ID;
  4601. AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
  4602. ID.AddInteger(SVT.getRawBits());
  4603. ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
  4604. MMO->isNonTemporal(), MMO->isInvariant()));
  4605. ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
  4606. void *IP = nullptr;
  4607. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
  4608. cast<StoreSDNode>(E)->refineAlignment(MMO);
  4609. return SDValue(E, 0);
  4610. }
  4611. SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
  4612. dl.getDebugLoc(), VTs,
  4613. ISD::UNINDEXED, true, SVT, MMO);
  4614. CSEMap.InsertNode(N, IP);
  4615. InsertNode(N);
  4616. return SDValue(N, 0);
  4617. }
  4618. SDValue
  4619. SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
  4620. SDValue Offset, ISD::MemIndexedMode AM) {
  4621. StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
  4622. assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
  4623. "Store is already a indexed store!");
  4624. SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
  4625. SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
  4626. FoldingSetNodeID ID;
  4627. AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
  4628. ID.AddInteger(ST->getMemoryVT().getRawBits());
  4629. ID.AddInteger(ST->getRawSubclassData());
  4630. ID.AddInteger(ST->getPointerInfo().getAddrSpace());
  4631. void *IP = nullptr;
  4632. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP))
  4633. return SDValue(E, 0);
  4634. SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
  4635. dl.getDebugLoc(), VTs, AM,
  4636. ST->isTruncatingStore(),
  4637. ST->getMemoryVT(),
  4638. ST->getMemOperand());
  4639. CSEMap.InsertNode(N, IP);
  4640. InsertNode(N);
  4641. return SDValue(N, 0);
  4642. }
  4643. SDValue
  4644. SelectionDAG::getMaskedLoad(EVT VT, SDLoc dl, SDValue Chain,
  4645. SDValue Ptr, SDValue Mask, SDValue Src0, EVT MemVT,
  4646. MachineMemOperand *MMO, ISD::LoadExtType ExtTy) {
  4647. SDVTList VTs = getVTList(VT, MVT::Other);
  4648. SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
  4649. FoldingSetNodeID ID;
  4650. AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
  4651. ID.AddInteger(VT.getRawBits());
  4652. ID.AddInteger(encodeMemSDNodeFlags(ExtTy, ISD::UNINDEXED,
  4653. MMO->isVolatile(),
  4654. MMO->isNonTemporal(),
  4655. MMO->isInvariant()));
  4656. ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
  4657. void *IP = nullptr;
  4658. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
  4659. cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
  4660. return SDValue(E, 0);
  4661. }
  4662. SDNode *N = new (NodeAllocator) MaskedLoadSDNode(dl.getIROrder(),
  4663. dl.getDebugLoc(), Ops, 4, VTs,
  4664. ExtTy, MemVT, MMO);
  4665. CSEMap.InsertNode(N, IP);
  4666. InsertNode(N);
  4667. return SDValue(N, 0);
  4668. }
  4669. SDValue SelectionDAG::getMaskedStore(SDValue Chain, SDLoc dl, SDValue Val,
  4670. SDValue Ptr, SDValue Mask, EVT MemVT,
  4671. MachineMemOperand *MMO, bool isTrunc) {
  4672. assert(Chain.getValueType() == MVT::Other &&
  4673. "Invalid chain type");
  4674. EVT VT = Val.getValueType();
  4675. SDVTList VTs = getVTList(MVT::Other);
  4676. SDValue Ops[] = { Chain, Ptr, Mask, Val };
  4677. FoldingSetNodeID ID;
  4678. AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
  4679. ID.AddInteger(VT.getRawBits());
  4680. ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
  4681. MMO->isNonTemporal(), MMO->isInvariant()));
  4682. ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
  4683. void *IP = nullptr;
  4684. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
  4685. cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
  4686. return SDValue(E, 0);
  4687. }
  4688. SDNode *N = new (NodeAllocator) MaskedStoreSDNode(dl.getIROrder(),
  4689. dl.getDebugLoc(), Ops, 4,
  4690. VTs, isTrunc, MemVT, MMO);
  4691. CSEMap.InsertNode(N, IP);
  4692. InsertNode(N);
  4693. return SDValue(N, 0);
  4694. }
  4695. SDValue
  4696. SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, SDLoc dl,
  4697. ArrayRef<SDValue> Ops,
  4698. MachineMemOperand *MMO) {
  4699. FoldingSetNodeID ID;
  4700. AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
  4701. ID.AddInteger(VT.getRawBits());
  4702. ID.AddInteger(encodeMemSDNodeFlags(ISD::NON_EXTLOAD, ISD::UNINDEXED,
  4703. MMO->isVolatile(),
  4704. MMO->isNonTemporal(),
  4705. MMO->isInvariant()));
  4706. ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
  4707. void *IP = nullptr;
  4708. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
  4709. cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
  4710. return SDValue(E, 0);
  4711. }
  4712. MaskedGatherSDNode *N =
  4713. new (NodeAllocator) MaskedGatherSDNode(dl.getIROrder(), dl.getDebugLoc(),
  4714. Ops, VTs, VT, MMO);
  4715. CSEMap.InsertNode(N, IP);
  4716. InsertNode(N);
  4717. return SDValue(N, 0);
  4718. }
  4719. SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, SDLoc dl,
  4720. ArrayRef<SDValue> Ops,
  4721. MachineMemOperand *MMO) {
  4722. FoldingSetNodeID ID;
  4723. AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
  4724. ID.AddInteger(VT.getRawBits());
  4725. ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
  4726. MMO->isNonTemporal(),
  4727. MMO->isInvariant()));
  4728. ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
  4729. void *IP = nullptr;
  4730. if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) {
  4731. cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
  4732. return SDValue(E, 0);
  4733. }
  4734. SDNode *N =
  4735. new (NodeAllocator) MaskedScatterSDNode(dl.getIROrder(), dl.getDebugLoc(),
  4736. Ops, VTs, VT, MMO);
  4737. CSEMap.InsertNode(N, IP);
  4738. InsertNode(N);
  4739. return SDValue(N, 0);
  4740. }
  4741. SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
  4742. SDValue Chain, SDValue Ptr,
  4743. SDValue SV,
  4744. unsigned Align) {
  4745. SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
  4746. return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
  4747. }
  4748. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
  4749. ArrayRef<SDUse> Ops) {
  4750. switch (Ops.size()) {
  4751. case 0: return getNode(Opcode, DL, VT);
  4752. case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
  4753. case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
  4754. case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
  4755. default: break;
  4756. }
  4757. // Copy from an SDUse array into an SDValue array for use with
  4758. // the regular getNode logic.
  4759. SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
  4760. return getNode(Opcode, DL, VT, NewOps);
  4761. }
  4762. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
  4763. ArrayRef<SDValue> Ops) {
  4764. unsigned NumOps = Ops.size();
  4765. switch (NumOps) {
  4766. case 0: return getNode(Opcode, DL, VT);
  4767. case 1: return getNode(Opcode, DL, VT, Ops[0]);
  4768. case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
  4769. case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
  4770. default: break;
  4771. }
  4772. switch (Opcode) {
  4773. default: break;
  4774. case ISD::SELECT_CC: {
  4775. assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
  4776. assert(Ops[0].getValueType() == Ops[1].getValueType() &&
  4777. "LHS and RHS of condition must have same type!");
  4778. assert(Ops[2].getValueType() == Ops[3].getValueType() &&
  4779. "True and False arms of SelectCC must have same type!");
  4780. assert(Ops[2].getValueType() == VT &&
  4781. "select_cc node must be of same type as true and false value!");
  4782. break;
  4783. }
  4784. case ISD::BR_CC: {
  4785. assert(NumOps == 5 && "BR_CC takes 5 operands!");
  4786. assert(Ops[2].getValueType() == Ops[3].getValueType() &&
  4787. "LHS/RHS of comparison should match types!");
  4788. break;
  4789. }
  4790. }
  4791. // Memoize nodes.
  4792. SDNode *N;
  4793. SDVTList VTs = getVTList(VT);
  4794. if (VT != MVT::Glue) {
  4795. FoldingSetNodeID ID;
  4796. AddNodeIDNode(ID, Opcode, VTs, Ops);
  4797. void *IP = nullptr;
  4798. if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
  4799. return SDValue(E, 0);
  4800. N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
  4801. VTs, Ops);
  4802. CSEMap.InsertNode(N, IP);
  4803. } else {
  4804. N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
  4805. VTs, Ops);
  4806. }
  4807. InsertNode(N);
  4808. return SDValue(N, 0);
  4809. }
  4810. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
  4811. ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
  4812. return getNode(Opcode, DL, getVTList(ResultTys), Ops);
  4813. }
  4814. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
  4815. ArrayRef<SDValue> Ops) {
  4816. if (VTList.NumVTs == 1)
  4817. return getNode(Opcode, DL, VTList.VTs[0], Ops);
  4818. #if 0
  4819. switch (Opcode) {
  4820. // FIXME: figure out how to safely handle things like
  4821. // int foo(int x) { return 1 << (x & 255); }
  4822. // int bar() { return foo(256); }
  4823. case ISD::SRA_PARTS:
  4824. case ISD::SRL_PARTS:
  4825. case ISD::SHL_PARTS:
  4826. if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
  4827. cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
  4828. return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
  4829. else if (N3.getOpcode() == ISD::AND)
  4830. if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
  4831. // If the and is only masking out bits that cannot effect the shift,
  4832. // eliminate the and.
  4833. unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
  4834. if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
  4835. return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
  4836. }
  4837. break;
  4838. }
  4839. #endif
  4840. // Memoize the node unless it returns a flag.
  4841. SDNode *N;
  4842. unsigned NumOps = Ops.size();
  4843. if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
  4844. FoldingSetNodeID ID;
  4845. AddNodeIDNode(ID, Opcode, VTList, Ops);
  4846. void *IP = nullptr;
  4847. if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
  4848. return SDValue(E, 0);
  4849. if (NumOps == 1) {
  4850. N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
  4851. DL.getDebugLoc(), VTList, Ops[0]);
  4852. } else if (NumOps == 2) {
  4853. N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
  4854. DL.getDebugLoc(), VTList, Ops[0],
  4855. Ops[1]);
  4856. } else if (NumOps == 3) {
  4857. N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
  4858. DL.getDebugLoc(), VTList, Ops[0],
  4859. Ops[1], Ops[2]);
  4860. } else {
  4861. N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
  4862. VTList, Ops);
  4863. }
  4864. CSEMap.InsertNode(N, IP);
  4865. } else {
  4866. if (NumOps == 1) {
  4867. N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
  4868. DL.getDebugLoc(), VTList, Ops[0]);
  4869. } else if (NumOps == 2) {
  4870. N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
  4871. DL.getDebugLoc(), VTList, Ops[0],
  4872. Ops[1]);
  4873. } else if (NumOps == 3) {
  4874. N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
  4875. DL.getDebugLoc(), VTList, Ops[0],
  4876. Ops[1], Ops[2]);
  4877. } else {
  4878. N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
  4879. VTList, Ops);
  4880. }
  4881. }
  4882. InsertNode(N);
  4883. return SDValue(N, 0);
  4884. }
  4885. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
  4886. return getNode(Opcode, DL, VTList, None);
  4887. }
  4888. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
  4889. SDValue N1) {
  4890. SDValue Ops[] = { N1 };
  4891. return getNode(Opcode, DL, VTList, Ops);
  4892. }
  4893. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
  4894. SDValue N1, SDValue N2) {
  4895. SDValue Ops[] = { N1, N2 };
  4896. return getNode(Opcode, DL, VTList, Ops);
  4897. }
  4898. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
  4899. SDValue N1, SDValue N2, SDValue N3) {
  4900. SDValue Ops[] = { N1, N2, N3 };
  4901. return getNode(Opcode, DL, VTList, Ops);
  4902. }
  4903. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
  4904. SDValue N1, SDValue N2, SDValue N3,
  4905. SDValue N4) {
  4906. SDValue Ops[] = { N1, N2, N3, N4 };
  4907. return getNode(Opcode, DL, VTList, Ops);
  4908. }
  4909. SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
  4910. SDValue N1, SDValue N2, SDValue N3,
  4911. SDValue N4, SDValue N5) {
  4912. SDValue Ops[] = { N1, N2, N3, N4, N5 };
  4913. return getNode(Opcode, DL, VTList, Ops);
  4914. }
  4915. SDVTList SelectionDAG::getVTList(EVT VT) {
  4916. return makeVTList(SDNode::getValueTypeList(VT), 1);
  4917. }
  4918. SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
  4919. FoldingSetNodeID ID;
  4920. ID.AddInteger(2U);
  4921. ID.AddInteger(VT1.getRawBits());
  4922. ID.AddInteger(VT2.getRawBits());
  4923. void *IP = nullptr;
  4924. SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
  4925. if (!Result) {
  4926. EVT *Array = Allocator.Allocate<EVT>(2);
  4927. Array[0] = VT1;
  4928. Array[1] = VT2;
  4929. Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
  4930. VTListMap.InsertNode(Result, IP);
  4931. }
  4932. return Result->getSDVTList();
  4933. }
  4934. SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
  4935. FoldingSetNodeID ID;
  4936. ID.AddInteger(3U);
  4937. ID.AddInteger(VT1.getRawBits());
  4938. ID.AddInteger(VT2.getRawBits());
  4939. ID.AddInteger(VT3.getRawBits());
  4940. void *IP = nullptr;
  4941. SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
  4942. if (!Result) {
  4943. EVT *Array = Allocator.Allocate<EVT>(3);
  4944. Array[0] = VT1;
  4945. Array[1] = VT2;
  4946. Array[2] = VT3;
  4947. Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
  4948. VTListMap.InsertNode(Result, IP);
  4949. }
  4950. return Result->getSDVTList();
  4951. }
  4952. SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
  4953. FoldingSetNodeID ID;
  4954. ID.AddInteger(4U);
  4955. ID.AddInteger(VT1.getRawBits());
  4956. ID.AddInteger(VT2.getRawBits());
  4957. ID.AddInteger(VT3.getRawBits());
  4958. ID.AddInteger(VT4.getRawBits());
  4959. void *IP = nullptr;
  4960. SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
  4961. if (!Result) {
  4962. EVT *Array = Allocator.Allocate<EVT>(4);
  4963. Array[0] = VT1;
  4964. Array[1] = VT2;
  4965. Array[2] = VT3;
  4966. Array[3] = VT4;
  4967. Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
  4968. VTListMap.InsertNode(Result, IP);
  4969. }
  4970. return Result->getSDVTList();
  4971. }
  4972. SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
  4973. unsigned NumVTs = VTs.size();
  4974. FoldingSetNodeID ID;
  4975. ID.AddInteger(NumVTs);
  4976. for (unsigned index = 0; index < NumVTs; index++) {
  4977. ID.AddInteger(VTs[index].getRawBits());
  4978. }
  4979. void *IP = nullptr;
  4980. SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
  4981. if (!Result) {
  4982. EVT *Array = Allocator.Allocate<EVT>(NumVTs);
  4983. std::copy(VTs.begin(), VTs.end(), Array);
  4984. Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
  4985. VTListMap.InsertNode(Result, IP);
  4986. }
  4987. return Result->getSDVTList();
  4988. }
  4989. /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
  4990. /// specified operands. If the resultant node already exists in the DAG,
  4991. /// this does not modify the specified node, instead it returns the node that
  4992. /// already exists. If the resultant node does not exist in the DAG, the
  4993. /// input node is returned. As a degenerate case, if you specify the same
  4994. /// input operands as the node already has, the input node is returned.
  4995. SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
  4996. assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
  4997. // Check to see if there is no change.
  4998. if (Op == N->getOperand(0)) return N;
  4999. // See if the modified node already exists.
  5000. void *InsertPos = nullptr;
  5001. if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
  5002. return Existing;
  5003. // Nope it doesn't. Remove the node from its current place in the maps.
  5004. if (InsertPos)
  5005. if (!RemoveNodeFromCSEMaps(N))
  5006. InsertPos = nullptr;
  5007. // Now we update the operands.
  5008. N->OperandList[0].set(Op);
  5009. // If this gets put into a CSE map, add it.
  5010. if (InsertPos) CSEMap.InsertNode(N, InsertPos);
  5011. return N;
  5012. }
  5013. SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
  5014. assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
  5015. // Check to see if there is no change.
  5016. if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
  5017. return N; // No operands changed, just return the input node.
  5018. // See if the modified node already exists.
  5019. void *InsertPos = nullptr;
  5020. if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
  5021. return Existing;
  5022. // Nope it doesn't. Remove the node from its current place in the maps.
  5023. if (InsertPos)
  5024. if (!RemoveNodeFromCSEMaps(N))
  5025. InsertPos = nullptr;
  5026. // Now we update the operands.
  5027. if (N->OperandList[0] != Op1)
  5028. N->OperandList[0].set(Op1);
  5029. if (N->OperandList[1] != Op2)
  5030. N->OperandList[1].set(Op2);
  5031. // If this gets put into a CSE map, add it.
  5032. if (InsertPos) CSEMap.InsertNode(N, InsertPos);
  5033. return N;
  5034. }
  5035. SDNode *SelectionDAG::
  5036. UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
  5037. SDValue Ops[] = { Op1, Op2, Op3 };
  5038. return UpdateNodeOperands(N, Ops);
  5039. }
  5040. SDNode *SelectionDAG::
  5041. UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
  5042. SDValue Op3, SDValue Op4) {
  5043. SDValue Ops[] = { Op1, Op2, Op3, Op4 };
  5044. return UpdateNodeOperands(N, Ops);
  5045. }
  5046. SDNode *SelectionDAG::
  5047. UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
  5048. SDValue Op3, SDValue Op4, SDValue Op5) {
  5049. SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
  5050. return UpdateNodeOperands(N, Ops);
  5051. }
  5052. SDNode *SelectionDAG::
  5053. UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
  5054. unsigned NumOps = Ops.size();
  5055. assert(N->getNumOperands() == NumOps &&
  5056. "Update with wrong number of operands");
  5057. // If no operands changed just return the input node.
  5058. if (Ops.empty() || std::equal(Ops.begin(), Ops.end(), N->op_begin()))
  5059. return N;
  5060. // See if the modified node already exists.
  5061. void *InsertPos = nullptr;
  5062. if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
  5063. return Existing;
  5064. // Nope it doesn't. Remove the node from its current place in the maps.
  5065. if (InsertPos)
  5066. if (!RemoveNodeFromCSEMaps(N))
  5067. InsertPos = nullptr;
  5068. // Now we update the operands.
  5069. for (unsigned i = 0; i != NumOps; ++i)
  5070. if (N->OperandList[i] != Ops[i])
  5071. N->OperandList[i].set(Ops[i]);
  5072. // If this gets put into a CSE map, add it.
  5073. if (InsertPos) CSEMap.InsertNode(N, InsertPos);
  5074. return N;
  5075. }
  5076. /// DropOperands - Release the operands and set this node to have
  5077. /// zero operands.
  5078. void SDNode::DropOperands() {
  5079. // Unlike the code in MorphNodeTo that does this, we don't need to
  5080. // watch for dead nodes here.
  5081. for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
  5082. SDUse &Use = *I++;
  5083. Use.set(SDValue());
  5084. }
  5085. }
  5086. /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
  5087. /// machine opcode.
  5088. ///
  5089. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5090. EVT VT) {
  5091. SDVTList VTs = getVTList(VT);
  5092. return SelectNodeTo(N, MachineOpc, VTs, None);
  5093. }
  5094. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5095. EVT VT, SDValue Op1) {
  5096. SDVTList VTs = getVTList(VT);
  5097. SDValue Ops[] = { Op1 };
  5098. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5099. }
  5100. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5101. EVT VT, SDValue Op1,
  5102. SDValue Op2) {
  5103. SDVTList VTs = getVTList(VT);
  5104. SDValue Ops[] = { Op1, Op2 };
  5105. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5106. }
  5107. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5108. EVT VT, SDValue Op1,
  5109. SDValue Op2, SDValue Op3) {
  5110. SDVTList VTs = getVTList(VT);
  5111. SDValue Ops[] = { Op1, Op2, Op3 };
  5112. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5113. }
  5114. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5115. EVT VT, ArrayRef<SDValue> Ops) {
  5116. SDVTList VTs = getVTList(VT);
  5117. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5118. }
  5119. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5120. EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
  5121. SDVTList VTs = getVTList(VT1, VT2);
  5122. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5123. }
  5124. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5125. EVT VT1, EVT VT2) {
  5126. SDVTList VTs = getVTList(VT1, VT2);
  5127. return SelectNodeTo(N, MachineOpc, VTs, None);
  5128. }
  5129. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5130. EVT VT1, EVT VT2, EVT VT3,
  5131. ArrayRef<SDValue> Ops) {
  5132. SDVTList VTs = getVTList(VT1, VT2, VT3);
  5133. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5134. }
  5135. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5136. EVT VT1, EVT VT2, EVT VT3, EVT VT4,
  5137. ArrayRef<SDValue> Ops) {
  5138. SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
  5139. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5140. }
  5141. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5142. EVT VT1, EVT VT2,
  5143. SDValue Op1) {
  5144. SDVTList VTs = getVTList(VT1, VT2);
  5145. SDValue Ops[] = { Op1 };
  5146. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5147. }
  5148. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5149. EVT VT1, EVT VT2,
  5150. SDValue Op1, SDValue Op2) {
  5151. SDVTList VTs = getVTList(VT1, VT2);
  5152. SDValue Ops[] = { Op1, Op2 };
  5153. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5154. }
  5155. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5156. EVT VT1, EVT VT2,
  5157. SDValue Op1, SDValue Op2,
  5158. SDValue Op3) {
  5159. SDVTList VTs = getVTList(VT1, VT2);
  5160. SDValue Ops[] = { Op1, Op2, Op3 };
  5161. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5162. }
  5163. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5164. EVT VT1, EVT VT2, EVT VT3,
  5165. SDValue Op1, SDValue Op2,
  5166. SDValue Op3) {
  5167. SDVTList VTs = getVTList(VT1, VT2, VT3);
  5168. SDValue Ops[] = { Op1, Op2, Op3 };
  5169. return SelectNodeTo(N, MachineOpc, VTs, Ops);
  5170. }
  5171. SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
  5172. SDVTList VTs,ArrayRef<SDValue> Ops) {
  5173. N = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
  5174. // Reset the NodeID to -1.
  5175. N->setNodeId(-1);
  5176. return N;
  5177. }
  5178. /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
  5179. /// the line number information on the merged node since it is not possible to
  5180. /// preserve the information that operation is associated with multiple lines.
  5181. /// This will make the debugger working better at -O0, were there is a higher
  5182. /// probability having other instructions associated with that line.
  5183. ///
  5184. /// For IROrder, we keep the smaller of the two
  5185. SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
  5186. DebugLoc NLoc = N->getDebugLoc();
  5187. if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
  5188. N->setDebugLoc(DebugLoc());
  5189. }
  5190. unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
  5191. N->setIROrder(Order);
  5192. return N;
  5193. }
  5194. /// MorphNodeTo - This *mutates* the specified node to have the specified
  5195. /// return type, opcode, and operands.
  5196. ///
  5197. /// Note that MorphNodeTo returns the resultant node. If there is already a
  5198. /// node of the specified opcode and operands, it returns that node instead of
  5199. /// the current one. Note that the SDLoc need not be the same.
  5200. ///
  5201. /// Using MorphNodeTo is faster than creating a new node and swapping it in
  5202. /// with ReplaceAllUsesWith both because it often avoids allocating a new
  5203. /// node, and because it doesn't require CSE recalculation for any of
  5204. /// the node's users.
  5205. ///
  5206. /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
  5207. /// As a consequence it isn't appropriate to use from within the DAG combiner or
  5208. /// the legalizer which maintain worklists that would need to be updated when
  5209. /// deleting things.
  5210. SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
  5211. SDVTList VTs, ArrayRef<SDValue> Ops) {
  5212. unsigned NumOps = Ops.size();
  5213. // If an identical node already exists, use it.
  5214. void *IP = nullptr;
  5215. if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
  5216. FoldingSetNodeID ID;
  5217. AddNodeIDNode(ID, Opc, VTs, Ops);
  5218. if (SDNode *ON = FindNodeOrInsertPos(ID, N->getDebugLoc(), IP))
  5219. return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
  5220. }
  5221. if (!RemoveNodeFromCSEMaps(N))
  5222. IP = nullptr;
  5223. // Start the morphing.
  5224. N->NodeType = Opc;
  5225. N->ValueList = VTs.VTs;
  5226. N->NumValues = VTs.NumVTs;
  5227. // Clear the operands list, updating used nodes to remove this from their
  5228. // use list. Keep track of any operands that become dead as a result.
  5229. SmallPtrSet<SDNode*, 16> DeadNodeSet;
  5230. for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
  5231. SDUse &Use = *I++;
  5232. SDNode *Used = Use.getNode();
  5233. Use.set(SDValue());
  5234. if (Used->use_empty())
  5235. DeadNodeSet.insert(Used);
  5236. }
  5237. if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
  5238. // Initialize the memory references information.
  5239. MN->setMemRefs(nullptr, nullptr);
  5240. // If NumOps is larger than the # of operands we can have in a
  5241. // MachineSDNode, reallocate the operand list.
  5242. if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
  5243. if (MN->OperandsNeedDelete)
  5244. delete[] MN->OperandList;
  5245. if (NumOps > array_lengthof(MN->LocalOperands))
  5246. // We're creating a final node that will live unmorphed for the
  5247. // remainder of the current SelectionDAG iteration, so we can allocate
  5248. // the operands directly out of a pool with no recycling metadata.
  5249. MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
  5250. Ops.data(), NumOps);
  5251. else
  5252. MN->InitOperands(MN->LocalOperands, Ops.data(), NumOps);
  5253. MN->OperandsNeedDelete = false;
  5254. } else
  5255. MN->InitOperands(MN->OperandList, Ops.data(), NumOps);
  5256. } else {
  5257. // If NumOps is larger than the # of operands we currently have, reallocate
  5258. // the operand list.
  5259. if (NumOps > N->NumOperands) {
  5260. if (N->OperandsNeedDelete)
  5261. delete[] N->OperandList;
  5262. N->InitOperands(new SDUse[NumOps], Ops.data(), NumOps);
  5263. N->OperandsNeedDelete = true;
  5264. } else
  5265. N->InitOperands(N->OperandList, Ops.data(), NumOps);
  5266. }
  5267. // Delete any nodes that are still dead after adding the uses for the
  5268. // new operands.
  5269. if (!DeadNodeSet.empty()) {
  5270. SmallVector<SDNode *, 16> DeadNodes;
  5271. for (SDNode *N : DeadNodeSet)
  5272. if (N->use_empty())
  5273. DeadNodes.push_back(N);
  5274. RemoveDeadNodes(DeadNodes);
  5275. }
  5276. if (IP)
  5277. CSEMap.InsertNode(N, IP); // Memoize the new node.
  5278. return N;
  5279. }
  5280. /// getMachineNode - These are used for target selectors to create a new node
  5281. /// with specified return type(s), MachineInstr opcode, and operands.
  5282. ///
  5283. /// Note that getMachineNode returns the resultant node. If there is already a
  5284. /// node of the specified opcode and operands, it returns that node instead of
  5285. /// the current one.
  5286. MachineSDNode *
  5287. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
  5288. SDVTList VTs = getVTList(VT);
  5289. return getMachineNode(Opcode, dl, VTs, None);
  5290. }
  5291. MachineSDNode *
  5292. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
  5293. SDVTList VTs = getVTList(VT);
  5294. SDValue Ops[] = { Op1 };
  5295. return getMachineNode(Opcode, dl, VTs, Ops);
  5296. }
  5297. MachineSDNode *
  5298. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
  5299. SDValue Op1, SDValue Op2) {
  5300. SDVTList VTs = getVTList(VT);
  5301. SDValue Ops[] = { Op1, Op2 };
  5302. return getMachineNode(Opcode, dl, VTs, Ops);
  5303. }
  5304. MachineSDNode *
  5305. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
  5306. SDValue Op1, SDValue Op2, SDValue Op3) {
  5307. SDVTList VTs = getVTList(VT);
  5308. SDValue Ops[] = { Op1, Op2, Op3 };
  5309. return getMachineNode(Opcode, dl, VTs, Ops);
  5310. }
  5311. MachineSDNode *
  5312. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
  5313. ArrayRef<SDValue> Ops) {
  5314. SDVTList VTs = getVTList(VT);
  5315. return getMachineNode(Opcode, dl, VTs, Ops);
  5316. }
  5317. MachineSDNode *
  5318. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
  5319. SDVTList VTs = getVTList(VT1, VT2);
  5320. return getMachineNode(Opcode, dl, VTs, None);
  5321. }
  5322. MachineSDNode *
  5323. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
  5324. EVT VT1, EVT VT2, SDValue Op1) {
  5325. SDVTList VTs = getVTList(VT1, VT2);
  5326. SDValue Ops[] = { Op1 };
  5327. return getMachineNode(Opcode, dl, VTs, Ops);
  5328. }
  5329. MachineSDNode *
  5330. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
  5331. EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
  5332. SDVTList VTs = getVTList(VT1, VT2);
  5333. SDValue Ops[] = { Op1, Op2 };
  5334. return getMachineNode(Opcode, dl, VTs, Ops);
  5335. }
  5336. MachineSDNode *
  5337. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
  5338. EVT VT1, EVT VT2, SDValue Op1,
  5339. SDValue Op2, SDValue Op3) {
  5340. SDVTList VTs = getVTList(VT1, VT2);
  5341. SDValue Ops[] = { Op1, Op2, Op3 };
  5342. return getMachineNode(Opcode, dl, VTs, Ops);
  5343. }
  5344. MachineSDNode *
  5345. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
  5346. EVT VT1, EVT VT2,
  5347. ArrayRef<SDValue> Ops) {
  5348. SDVTList VTs = getVTList(VT1, VT2);
  5349. return getMachineNode(Opcode, dl, VTs, Ops);
  5350. }
  5351. MachineSDNode *
  5352. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
  5353. EVT VT1, EVT VT2, EVT VT3,
  5354. SDValue Op1, SDValue Op2) {
  5355. SDVTList VTs = getVTList(VT1, VT2, VT3);
  5356. SDValue Ops[] = { Op1, Op2 };
  5357. return getMachineNode(Opcode, dl, VTs, Ops);
  5358. }
  5359. MachineSDNode *
  5360. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
  5361. EVT VT1, EVT VT2, EVT VT3,
  5362. SDValue Op1, SDValue Op2, SDValue Op3) {
  5363. SDVTList VTs = getVTList(VT1, VT2, VT3);
  5364. SDValue Ops[] = { Op1, Op2, Op3 };
  5365. return getMachineNode(Opcode, dl, VTs, Ops);
  5366. }
  5367. MachineSDNode *
  5368. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
  5369. EVT VT1, EVT VT2, EVT VT3,
  5370. ArrayRef<SDValue> Ops) {
  5371. SDVTList VTs = getVTList(VT1, VT2, VT3);
  5372. return getMachineNode(Opcode, dl, VTs, Ops);
  5373. }
  5374. MachineSDNode *
  5375. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
  5376. EVT VT2, EVT VT3, EVT VT4,
  5377. ArrayRef<SDValue> Ops) {
  5378. SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
  5379. return getMachineNode(Opcode, dl, VTs, Ops);
  5380. }
  5381. MachineSDNode *
  5382. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
  5383. ArrayRef<EVT> ResultTys,
  5384. ArrayRef<SDValue> Ops) {
  5385. SDVTList VTs = getVTList(ResultTys);
  5386. return getMachineNode(Opcode, dl, VTs, Ops);
  5387. }
  5388. MachineSDNode *
  5389. SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
  5390. ArrayRef<SDValue> OpsArray) {
  5391. bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
  5392. MachineSDNode *N;
  5393. void *IP = nullptr;
  5394. const SDValue *Ops = OpsArray.data();
  5395. unsigned NumOps = OpsArray.size();
  5396. if (DoCSE) {
  5397. FoldingSetNodeID ID;
  5398. AddNodeIDNode(ID, ~Opcode, VTs, OpsArray);
  5399. IP = nullptr;
  5400. if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) {
  5401. return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
  5402. }
  5403. }
  5404. // Allocate a new MachineSDNode.
  5405. N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
  5406. DL.getDebugLoc(), VTs);
  5407. // Initialize the operands list.
  5408. if (NumOps > array_lengthof(N->LocalOperands))
  5409. // We're creating a final node that will live unmorphed for the
  5410. // remainder of the current SelectionDAG iteration, so we can allocate
  5411. // the operands directly out of a pool with no recycling metadata.
  5412. N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
  5413. Ops, NumOps);
  5414. else
  5415. N->InitOperands(N->LocalOperands, Ops, NumOps);
  5416. N->OperandsNeedDelete = false;
  5417. if (DoCSE)
  5418. CSEMap.InsertNode(N, IP);
  5419. InsertNode(N);
  5420. return N;
  5421. }
  5422. /// getTargetExtractSubreg - A convenience function for creating
  5423. /// TargetOpcode::EXTRACT_SUBREG nodes.
  5424. SDValue
  5425. SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
  5426. SDValue Operand) {
  5427. SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
  5428. SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
  5429. VT, Operand, SRIdxVal);
  5430. return SDValue(Subreg, 0);
  5431. }
  5432. /// getTargetInsertSubreg - A convenience function for creating
  5433. /// TargetOpcode::INSERT_SUBREG nodes.
  5434. SDValue
  5435. SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
  5436. SDValue Operand, SDValue Subreg) {
  5437. SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
  5438. SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
  5439. VT, Operand, Subreg, SRIdxVal);
  5440. return SDValue(Result, 0);
  5441. }
  5442. /// getNodeIfExists - Get the specified node if it's already available, or
  5443. /// else return NULL.
  5444. SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
  5445. ArrayRef<SDValue> Ops,
  5446. const SDNodeFlags *Flags) {
  5447. if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
  5448. FoldingSetNodeID ID;
  5449. AddNodeIDNode(ID, Opcode, VTList, Ops);
  5450. AddNodeIDFlags(ID, Opcode, Flags);
  5451. void *IP = nullptr;
  5452. if (SDNode *E = FindNodeOrInsertPos(ID, DebugLoc(), IP))
  5453. return E;
  5454. }
  5455. return nullptr;
  5456. }
  5457. /// getDbgValue - Creates a SDDbgValue node.
  5458. ///
  5459. /// SDNode
  5460. SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N,
  5461. unsigned R, bool IsIndirect, uint64_t Off,
  5462. DebugLoc DL, unsigned O) {
  5463. assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
  5464. "Expected inlined-at fields to agree");
  5465. return new (DbgInfo->getAlloc())
  5466. SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O);
  5467. }
  5468. /// Constant
  5469. SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr,
  5470. const Value *C, uint64_t Off,
  5471. DebugLoc DL, unsigned O) {
  5472. assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
  5473. "Expected inlined-at fields to agree");
  5474. return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O);
  5475. }
  5476. /// FrameIndex
  5477. SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr,
  5478. unsigned FI, uint64_t Off,
  5479. DebugLoc DL, unsigned O) {
  5480. assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
  5481. "Expected inlined-at fields to agree");
  5482. return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O);
  5483. }
  5484. namespace {
  5485. /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
  5486. /// pointed to by a use iterator is deleted, increment the use iterator
  5487. /// so that it doesn't dangle.
  5488. ///
  5489. class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
  5490. SDNode::use_iterator &UI;
  5491. SDNode::use_iterator &UE;
  5492. void NodeDeleted(SDNode *N, SDNode *E) override {
  5493. // Increment the iterator as needed.
  5494. while (UI != UE && N == *UI)
  5495. ++UI;
  5496. }
  5497. public:
  5498. RAUWUpdateListener(SelectionDAG &d,
  5499. SDNode::use_iterator &ui,
  5500. SDNode::use_iterator &ue)
  5501. : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
  5502. };
  5503. }
  5504. /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
  5505. /// This can cause recursive merging of nodes in the DAG.
  5506. ///
  5507. /// This version assumes From has a single result value.
  5508. ///
  5509. void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
  5510. SDNode *From = FromN.getNode();
  5511. assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
  5512. "Cannot replace with this method!");
  5513. assert(From != To.getNode() && "Cannot replace uses of with self");
  5514. // Iterate over all the existing uses of From. New uses will be added
  5515. // to the beginning of the use list, which we avoid visiting.
  5516. // This specifically avoids visiting uses of From that arise while the
  5517. // replacement is happening, because any such uses would be the result
  5518. // of CSE: If an existing node looks like From after one of its operands
  5519. // is replaced by To, we don't want to replace of all its users with To
  5520. // too. See PR3018 for more info.
  5521. SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
  5522. RAUWUpdateListener Listener(*this, UI, UE);
  5523. while (UI != UE) {
  5524. SDNode *User = *UI;
  5525. // This node is about to morph, remove its old self from the CSE maps.
  5526. RemoveNodeFromCSEMaps(User);
  5527. // A user can appear in a use list multiple times, and when this
  5528. // happens the uses are usually next to each other in the list.
  5529. // To help reduce the number of CSE recomputations, process all
  5530. // the uses of this user that we can find this way.
  5531. do {
  5532. SDUse &Use = UI.getUse();
  5533. ++UI;
  5534. Use.set(To);
  5535. } while (UI != UE && *UI == User);
  5536. // Now that we have modified User, add it back to the CSE maps. If it
  5537. // already exists there, recursively merge the results together.
  5538. AddModifiedNodeToCSEMaps(User);
  5539. }
  5540. // If we just RAUW'd the root, take note.
  5541. if (FromN == getRoot())
  5542. setRoot(To);
  5543. }
  5544. /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
  5545. /// This can cause recursive merging of nodes in the DAG.
  5546. ///
  5547. /// This version assumes that for each value of From, there is a
  5548. /// corresponding value in To in the same position with the same type.
  5549. ///
  5550. void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
  5551. #ifndef NDEBUG
  5552. for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
  5553. assert((!From->hasAnyUseOfValue(i) ||
  5554. From->getValueType(i) == To->getValueType(i)) &&
  5555. "Cannot use this version of ReplaceAllUsesWith!");
  5556. #endif
  5557. // Handle the trivial case.
  5558. if (From == To)
  5559. return;
  5560. // Iterate over just the existing users of From. See the comments in
  5561. // the ReplaceAllUsesWith above.
  5562. SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
  5563. RAUWUpdateListener Listener(*this, UI, UE);
  5564. while (UI != UE) {
  5565. SDNode *User = *UI;
  5566. // This node is about to morph, remove its old self from the CSE maps.
  5567. RemoveNodeFromCSEMaps(User);
  5568. // A user can appear in a use list multiple times, and when this
  5569. // happens the uses are usually next to each other in the list.
  5570. // To help reduce the number of CSE recomputations, process all
  5571. // the uses of this user that we can find this way.
  5572. do {
  5573. SDUse &Use = UI.getUse();
  5574. ++UI;
  5575. Use.setNode(To);
  5576. } while (UI != UE && *UI == User);
  5577. // Now that we have modified User, add it back to the CSE maps. If it
  5578. // already exists there, recursively merge the results together.
  5579. AddModifiedNodeToCSEMaps(User);
  5580. }
  5581. // If we just RAUW'd the root, take note.
  5582. if (From == getRoot().getNode())
  5583. setRoot(SDValue(To, getRoot().getResNo()));
  5584. }
  5585. /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
  5586. /// This can cause recursive merging of nodes in the DAG.
  5587. ///
  5588. /// This version can replace From with any result values. To must match the
  5589. /// number and types of values returned by From.
  5590. void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
  5591. if (From->getNumValues() == 1) // Handle the simple case efficiently.
  5592. return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
  5593. // Iterate over just the existing users of From. See the comments in
  5594. // the ReplaceAllUsesWith above.
  5595. SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
  5596. RAUWUpdateListener Listener(*this, UI, UE);
  5597. while (UI != UE) {
  5598. SDNode *User = *UI;
  5599. // This node is about to morph, remove its old self from the CSE maps.
  5600. RemoveNodeFromCSEMaps(User);
  5601. // A user can appear in a use list multiple times, and when this
  5602. // happens the uses are usually next to each other in the list.
  5603. // To help reduce the number of CSE recomputations, process all
  5604. // the uses of this user that we can find this way.
  5605. do {
  5606. SDUse &Use = UI.getUse();
  5607. const SDValue &ToOp = To[Use.getResNo()];
  5608. ++UI;
  5609. Use.set(ToOp);
  5610. } while (UI != UE && *UI == User);
  5611. // Now that we have modified User, add it back to the CSE maps. If it
  5612. // already exists there, recursively merge the results together.
  5613. AddModifiedNodeToCSEMaps(User);
  5614. }
  5615. // If we just RAUW'd the root, take note.
  5616. if (From == getRoot().getNode())
  5617. setRoot(SDValue(To[getRoot().getResNo()]));
  5618. }
  5619. /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
  5620. /// uses of other values produced by From.getNode() alone. The Deleted
  5621. /// vector is handled the same way as for ReplaceAllUsesWith.
  5622. void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
  5623. // Handle the really simple, really trivial case efficiently.
  5624. if (From == To) return;
  5625. // Handle the simple, trivial, case efficiently.
  5626. if (From.getNode()->getNumValues() == 1) {
  5627. ReplaceAllUsesWith(From, To);
  5628. return;
  5629. }
  5630. // Iterate over just the existing users of From. See the comments in
  5631. // the ReplaceAllUsesWith above.
  5632. SDNode::use_iterator UI = From.getNode()->use_begin(),
  5633. UE = From.getNode()->use_end();
  5634. RAUWUpdateListener Listener(*this, UI, UE);
  5635. while (UI != UE) {
  5636. SDNode *User = *UI;
  5637. bool UserRemovedFromCSEMaps = false;
  5638. // A user can appear in a use list multiple times, and when this
  5639. // happens the uses are usually next to each other in the list.
  5640. // To help reduce the number of CSE recomputations, process all
  5641. // the uses of this user that we can find this way.
  5642. do {
  5643. SDUse &Use = UI.getUse();
  5644. // Skip uses of different values from the same node.
  5645. if (Use.getResNo() != From.getResNo()) {
  5646. ++UI;
  5647. continue;
  5648. }
  5649. // If this node hasn't been modified yet, it's still in the CSE maps,
  5650. // so remove its old self from the CSE maps.
  5651. if (!UserRemovedFromCSEMaps) {
  5652. RemoveNodeFromCSEMaps(User);
  5653. UserRemovedFromCSEMaps = true;
  5654. }
  5655. ++UI;
  5656. Use.set(To);
  5657. } while (UI != UE && *UI == User);
  5658. // We are iterating over all uses of the From node, so if a use
  5659. // doesn't use the specific value, no changes are made.
  5660. if (!UserRemovedFromCSEMaps)
  5661. continue;
  5662. // Now that we have modified User, add it back to the CSE maps. If it
  5663. // already exists there, recursively merge the results together.
  5664. AddModifiedNodeToCSEMaps(User);
  5665. }
  5666. // If we just RAUW'd the root, take note.
  5667. if (From == getRoot())
  5668. setRoot(To);
  5669. }
  5670. namespace {
  5671. /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
  5672. /// to record information about a use.
  5673. struct UseMemo {
  5674. SDNode *User;
  5675. unsigned Index;
  5676. SDUse *Use;
  5677. };
  5678. /// operator< - Sort Memos by User.
  5679. bool operator<(const UseMemo &L, const UseMemo &R) {
  5680. return (intptr_t)L.User < (intptr_t)R.User;
  5681. }
  5682. }
  5683. /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
  5684. /// uses of other values produced by From.getNode() alone. The same value
  5685. /// may appear in both the From and To list. The Deleted vector is
  5686. /// handled the same way as for ReplaceAllUsesWith.
  5687. void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
  5688. const SDValue *To,
  5689. unsigned Num){
  5690. // Handle the simple, trivial case efficiently.
  5691. if (Num == 1)
  5692. return ReplaceAllUsesOfValueWith(*From, *To);
  5693. // Read up all the uses and make records of them. This helps
  5694. // processing new uses that are introduced during the
  5695. // replacement process.
  5696. SmallVector<UseMemo, 4> Uses;
  5697. for (unsigned i = 0; i != Num; ++i) {
  5698. unsigned FromResNo = From[i].getResNo();
  5699. SDNode *FromNode = From[i].getNode();
  5700. for (SDNode::use_iterator UI = FromNode->use_begin(),
  5701. E = FromNode->use_end(); UI != E; ++UI) {
  5702. SDUse &Use = UI.getUse();
  5703. if (Use.getResNo() == FromResNo) {
  5704. UseMemo Memo = { *UI, i, &Use };
  5705. Uses.push_back(Memo);
  5706. }
  5707. }
  5708. }
  5709. // Sort the uses, so that all the uses from a given User are together.
  5710. std::sort(Uses.begin(), Uses.end());
  5711. for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
  5712. UseIndex != UseIndexEnd; ) {
  5713. // We know that this user uses some value of From. If it is the right
  5714. // value, update it.
  5715. SDNode *User = Uses[UseIndex].User;
  5716. // This node is about to morph, remove its old self from the CSE maps.
  5717. RemoveNodeFromCSEMaps(User);
  5718. // The Uses array is sorted, so all the uses for a given User
  5719. // are next to each other in the list.
  5720. // To help reduce the number of CSE recomputations, process all
  5721. // the uses of this user that we can find this way.
  5722. do {
  5723. unsigned i = Uses[UseIndex].Index;
  5724. SDUse &Use = *Uses[UseIndex].Use;
  5725. ++UseIndex;
  5726. Use.set(To[i]);
  5727. } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
  5728. // Now that we have modified User, add it back to the CSE maps. If it
  5729. // already exists there, recursively merge the results together.
  5730. AddModifiedNodeToCSEMaps(User);
  5731. }
  5732. }
  5733. /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
  5734. /// based on their topological order. It returns the maximum id and a vector
  5735. /// of the SDNodes* in assigned order by reference.
  5736. unsigned SelectionDAG::AssignTopologicalOrder() {
  5737. unsigned DAGSize = 0;
  5738. // SortedPos tracks the progress of the algorithm. Nodes before it are
  5739. // sorted, nodes after it are unsorted. When the algorithm completes
  5740. // it is at the end of the list.
  5741. allnodes_iterator SortedPos = allnodes_begin();
  5742. // Visit all the nodes. Move nodes with no operands to the front of
  5743. // the list immediately. Annotate nodes that do have operands with their
  5744. // operand count. Before we do this, the Node Id fields of the nodes
  5745. // may contain arbitrary values. After, the Node Id fields for nodes
  5746. // before SortedPos will contain the topological sort index, and the
  5747. // Node Id fields for nodes At SortedPos and after will contain the
  5748. // count of outstanding operands.
  5749. for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
  5750. SDNode *N = I++;
  5751. checkForCycles(N, this);
  5752. unsigned Degree = N->getNumOperands();
  5753. if (Degree == 0) {
  5754. // A node with no uses, add it to the result array immediately.
  5755. N->setNodeId(DAGSize++);
  5756. allnodes_iterator Q = N;
  5757. if (Q != SortedPos)
  5758. SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
  5759. assert(SortedPos != AllNodes.end() && "Overran node list");
  5760. ++SortedPos;
  5761. } else {
  5762. // Temporarily use the Node Id as scratch space for the degree count.
  5763. N->setNodeId(Degree);
  5764. }
  5765. }
  5766. // Visit all the nodes. As we iterate, move nodes into sorted order,
  5767. // such that by the time the end is reached all nodes will be sorted.
  5768. for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
  5769. SDNode *N = I;
  5770. checkForCycles(N, this);
  5771. // N is in sorted position, so all its uses have one less operand
  5772. // that needs to be sorted.
  5773. for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
  5774. UI != UE; ++UI) {
  5775. SDNode *P = *UI;
  5776. unsigned Degree = P->getNodeId();
  5777. assert(Degree != 0 && "Invalid node degree");
  5778. --Degree;
  5779. if (Degree == 0) {
  5780. // All of P's operands are sorted, so P may sorted now.
  5781. P->setNodeId(DAGSize++);
  5782. if (P != SortedPos)
  5783. SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
  5784. assert(SortedPos != AllNodes.end() && "Overran node list");
  5785. ++SortedPos;
  5786. } else {
  5787. // Update P's outstanding operand count.
  5788. P->setNodeId(Degree);
  5789. }
  5790. }
  5791. if (I == SortedPos) {
  5792. #ifndef NDEBUG
  5793. SDNode *S = ++I;
  5794. dbgs() << "Overran sorted position:\n";
  5795. S->dumprFull(this); dbgs() << "\n";
  5796. dbgs() << "Checking if this is due to cycles\n";
  5797. checkForCycles(this, true);
  5798. #endif
  5799. llvm_unreachable(nullptr);
  5800. }
  5801. }
  5802. assert(SortedPos == AllNodes.end() &&
  5803. "Topological sort incomplete!");
  5804. assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
  5805. "First node in topological sort is not the entry token!");
  5806. assert(AllNodes.front().getNodeId() == 0 &&
  5807. "First node in topological sort has non-zero id!");
  5808. assert(AllNodes.front().getNumOperands() == 0 &&
  5809. "First node in topological sort has operands!");
  5810. assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
  5811. "Last node in topologic sort has unexpected id!");
  5812. assert(AllNodes.back().use_empty() &&
  5813. "Last node in topologic sort has users!");
  5814. assert(DAGSize == allnodes_size() && "Node count mismatch!");
  5815. return DAGSize;
  5816. }
  5817. /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
  5818. /// value is produced by SD.
  5819. void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
  5820. if (SD) {
  5821. assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
  5822. SD->setHasDebugValue(true);
  5823. }
  5824. DbgInfo->add(DB, SD, isParameter);
  5825. }
  5826. /// TransferDbgValues - Transfer SDDbgValues.
  5827. void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
  5828. if (From == To || !From.getNode()->getHasDebugValue())
  5829. return;
  5830. SDNode *FromNode = From.getNode();
  5831. SDNode *ToNode = To.getNode();
  5832. ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
  5833. SmallVector<SDDbgValue *, 2> ClonedDVs;
  5834. for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
  5835. I != E; ++I) {
  5836. SDDbgValue *Dbg = *I;
  5837. if (Dbg->getKind() == SDDbgValue::SDNODE) {
  5838. SDDbgValue *Clone =
  5839. getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode,
  5840. To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(),
  5841. Dbg->getDebugLoc(), Dbg->getOrder());
  5842. ClonedDVs.push_back(Clone);
  5843. }
  5844. }
  5845. for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
  5846. E = ClonedDVs.end(); I != E; ++I)
  5847. AddDbgValue(*I, ToNode, false);
  5848. }
  5849. //===----------------------------------------------------------------------===//
  5850. // SDNode Class
  5851. //===----------------------------------------------------------------------===//
  5852. HandleSDNode::~HandleSDNode() {
  5853. DropOperands();
  5854. }
  5855. GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
  5856. DebugLoc DL, const GlobalValue *GA,
  5857. EVT VT, int64_t o, unsigned char TF)
  5858. : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
  5859. TheGlobal = GA;
  5860. }
  5861. AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
  5862. SDValue X, unsigned SrcAS,
  5863. unsigned DestAS)
  5864. : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
  5865. SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
  5866. MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
  5867. EVT memvt, MachineMemOperand *mmo)
  5868. : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
  5869. SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
  5870. MMO->isNonTemporal(), MMO->isInvariant());
  5871. assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
  5872. assert(isNonTemporal() == MMO->isNonTemporal() &&
  5873. "Non-temporal encoding error!");
  5874. // We check here that the size of the memory operand fits within the size of
  5875. // the MMO. This is because the MMO might indicate only a possible address
  5876. // range instead of specifying the affected memory addresses precisely.
  5877. assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
  5878. }
  5879. MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
  5880. ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo)
  5881. : SDNode(Opc, Order, dl, VTs, Ops),
  5882. MemoryVT(memvt), MMO(mmo) {
  5883. SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
  5884. MMO->isNonTemporal(), MMO->isInvariant());
  5885. assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
  5886. assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
  5887. }
  5888. /// Profile - Gather unique data for the node.
  5889. ///
  5890. void SDNode::Profile(FoldingSetNodeID &ID) const {
  5891. AddNodeIDNode(ID, this);
  5892. }
  5893. namespace {
  5894. struct EVTArray {
  5895. std::vector<EVT> VTs;
  5896. EVTArray() {
  5897. VTs.reserve(MVT::LAST_VALUETYPE);
  5898. for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
  5899. VTs.push_back(MVT((MVT::SimpleValueType)i));
  5900. }
  5901. };
  5902. }
  5903. static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
  5904. static ManagedStatic<EVTArray> SimpleVTArray;
  5905. static ManagedStatic<sys::SmartMutex<true> > VTMutex;
  5906. /// getValueTypeList - Return a pointer to the specified value type.
  5907. ///
  5908. const EVT *SDNode::getValueTypeList(EVT VT) {
  5909. if (VT.isExtended()) {
  5910. sys::SmartScopedLock<true> Lock(*VTMutex);
  5911. return &(*EVTs->insert(VT).first);
  5912. } else {
  5913. assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
  5914. "Value type out of range!");
  5915. return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
  5916. }
  5917. }
  5918. /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
  5919. /// indicated value. This method ignores uses of other values defined by this
  5920. /// operation.
  5921. bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
  5922. assert(Value < getNumValues() && "Bad value!");
  5923. // TODO: Only iterate over uses of a given value of the node
  5924. for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
  5925. if (UI.getUse().getResNo() == Value) {
  5926. if (NUses == 0)
  5927. return false;
  5928. --NUses;
  5929. }
  5930. }
  5931. // Found exactly the right number of uses?
  5932. return NUses == 0;
  5933. }
  5934. /// hasAnyUseOfValue - Return true if there are any use of the indicated
  5935. /// value. This method ignores uses of other values defined by this operation.
  5936. bool SDNode::hasAnyUseOfValue(unsigned Value) const {
  5937. assert(Value < getNumValues() && "Bad value!");
  5938. for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
  5939. if (UI.getUse().getResNo() == Value)
  5940. return true;
  5941. return false;
  5942. }
  5943. /// isOnlyUserOf - Return true if this node is the only use of N.
  5944. ///
  5945. bool SDNode::isOnlyUserOf(const SDNode *N) const {
  5946. bool Seen = false;
  5947. for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
  5948. SDNode *User = *I;
  5949. if (User == this)
  5950. Seen = true;
  5951. else
  5952. return false;
  5953. }
  5954. return Seen;
  5955. }
  5956. /// isOperand - Return true if this node is an operand of N.
  5957. ///
  5958. bool SDValue::isOperandOf(const SDNode *N) const {
  5959. for (const SDValue &Op : N->op_values())
  5960. if (*this == Op)
  5961. return true;
  5962. return false;
  5963. }
  5964. bool SDNode::isOperandOf(const SDNode *N) const {
  5965. for (const SDValue &Op : N->op_values())
  5966. if (this == Op.getNode())
  5967. return true;
  5968. return false;
  5969. }
  5970. /// reachesChainWithoutSideEffects - Return true if this operand (which must
  5971. /// be a chain) reaches the specified operand without crossing any
  5972. /// side-effecting instructions on any chain path. In practice, this looks
  5973. /// through token factors and non-volatile loads. In order to remain efficient,
  5974. /// this only looks a couple of nodes in, it does not do an exhaustive search.
  5975. bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
  5976. unsigned Depth) const {
  5977. if (*this == Dest) return true;
  5978. // Don't search too deeply, we just want to be able to see through
  5979. // TokenFactor's etc.
  5980. if (Depth == 0) return false;
  5981. // If this is a token factor, all inputs to the TF happen in parallel. If any
  5982. // of the operands of the TF does not reach dest, then we cannot do the xform.
  5983. if (getOpcode() == ISD::TokenFactor) {
  5984. for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
  5985. if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
  5986. return false;
  5987. return true;
  5988. }
  5989. // Loads don't have side effects, look through them.
  5990. if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
  5991. if (!Ld->isVolatile())
  5992. return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
  5993. }
  5994. return false;
  5995. }
  5996. /// hasPredecessor - Return true if N is a predecessor of this node.
  5997. /// N is either an operand of this node, or can be reached by recursively
  5998. /// traversing up the operands.
  5999. /// NOTE: This is an expensive method. Use it carefully.
  6000. bool SDNode::hasPredecessor(const SDNode *N) const {
  6001. SmallPtrSet<const SDNode *, 32> Visited;
  6002. SmallVector<const SDNode *, 16> Worklist;
  6003. return hasPredecessorHelper(N, Visited, Worklist);
  6004. }
  6005. bool
  6006. SDNode::hasPredecessorHelper(const SDNode *N,
  6007. SmallPtrSetImpl<const SDNode *> &Visited,
  6008. SmallVectorImpl<const SDNode *> &Worklist) const {
  6009. if (Visited.empty()) {
  6010. Worklist.push_back(this);
  6011. } else {
  6012. // Take a look in the visited set. If we've already encountered this node
  6013. // we needn't search further.
  6014. if (Visited.count(N))
  6015. return true;
  6016. }
  6017. // Haven't visited N yet. Continue the search.
  6018. while (!Worklist.empty()) {
  6019. const SDNode *M = Worklist.pop_back_val();
  6020. for (const SDValue &OpV : M->op_values()) {
  6021. SDNode *Op = OpV.getNode();
  6022. if (Visited.insert(Op).second)
  6023. Worklist.push_back(Op);
  6024. if (Op == N)
  6025. return true;
  6026. }
  6027. }
  6028. return false;
  6029. }
  6030. uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
  6031. assert(Num < NumOperands && "Invalid child # of SDNode!");
  6032. return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
  6033. }
  6034. SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
  6035. assert(N->getNumValues() == 1 &&
  6036. "Can't unroll a vector with multiple results!");
  6037. EVT VT = N->getValueType(0);
  6038. unsigned NE = VT.getVectorNumElements();
  6039. EVT EltVT = VT.getVectorElementType();
  6040. SDLoc dl(N);
  6041. SmallVector<SDValue, 8> Scalars;
  6042. SmallVector<SDValue, 4> Operands(N->getNumOperands());
  6043. // If ResNE is 0, fully unroll the vector op.
  6044. if (ResNE == 0)
  6045. ResNE = NE;
  6046. else if (NE > ResNE)
  6047. NE = ResNE;
  6048. unsigned i;
  6049. for (i= 0; i != NE; ++i) {
  6050. for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
  6051. SDValue Operand = N->getOperand(j);
  6052. EVT OperandVT = Operand.getValueType();
  6053. if (OperandVT.isVector()) {
  6054. // A vector operand; extract a single element.
  6055. EVT OperandEltVT = OperandVT.getVectorElementType();
  6056. Operands[j] =
  6057. getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
  6058. getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
  6059. } else {
  6060. // A scalar operand; just use it as is.
  6061. Operands[j] = Operand;
  6062. }
  6063. }
  6064. switch (N->getOpcode()) {
  6065. default:
  6066. Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands));
  6067. break;
  6068. case ISD::VSELECT:
  6069. Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
  6070. break;
  6071. case ISD::SHL:
  6072. case ISD::SRA:
  6073. case ISD::SRL:
  6074. case ISD::ROTL:
  6075. case ISD::ROTR:
  6076. Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
  6077. getShiftAmountOperand(Operands[0].getValueType(),
  6078. Operands[1])));
  6079. break;
  6080. case ISD::SIGN_EXTEND_INREG:
  6081. case ISD::FP_ROUND_INREG: {
  6082. EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
  6083. Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
  6084. Operands[0],
  6085. getValueType(ExtVT)));
  6086. }
  6087. }
  6088. }
  6089. for (; i < ResNE; ++i)
  6090. Scalars.push_back(getUNDEF(EltVT));
  6091. return getNode(ISD::BUILD_VECTOR, dl,
  6092. EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars);
  6093. }
  6094. /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
  6095. /// location that is 'Dist' units away from the location that the 'Base' load
  6096. /// is loading from.
  6097. bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
  6098. unsigned Bytes, int Dist) const {
  6099. if (LD->getChain() != Base->getChain())
  6100. return false;
  6101. EVT VT = LD->getValueType(0);
  6102. if (VT.getSizeInBits() / 8 != Bytes)
  6103. return false;
  6104. SDValue Loc = LD->getOperand(1);
  6105. SDValue BaseLoc = Base->getOperand(1);
  6106. if (Loc.getOpcode() == ISD::FrameIndex) {
  6107. if (BaseLoc.getOpcode() != ISD::FrameIndex)
  6108. return false;
  6109. const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
  6110. int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
  6111. int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
  6112. int FS = MFI->getObjectSize(FI);
  6113. int BFS = MFI->getObjectSize(BFI);
  6114. if (FS != BFS || FS != (int)Bytes) return false;
  6115. return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
  6116. }
  6117. // Handle X + C.
  6118. if (isBaseWithConstantOffset(Loc)) {
  6119. int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
  6120. if (Loc.getOperand(0) == BaseLoc) {
  6121. // If the base location is a simple address with no offset itself, then
  6122. // the second load's first add operand should be the base address.
  6123. if (LocOffset == Dist * (int)Bytes)
  6124. return true;
  6125. } else if (isBaseWithConstantOffset(BaseLoc)) {
  6126. // The base location itself has an offset, so subtract that value from the
  6127. // second load's offset before comparing to distance * size.
  6128. int64_t BOffset =
  6129. cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
  6130. if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
  6131. if ((LocOffset - BOffset) == Dist * (int)Bytes)
  6132. return true;
  6133. }
  6134. }
  6135. }
  6136. const GlobalValue *GV1 = nullptr;
  6137. const GlobalValue *GV2 = nullptr;
  6138. int64_t Offset1 = 0;
  6139. int64_t Offset2 = 0;
  6140. bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
  6141. bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
  6142. if (isGA1 && isGA2 && GV1 == GV2)
  6143. return Offset1 == (Offset2 + Dist*Bytes);
  6144. return false;
  6145. }
  6146. /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
  6147. /// it cannot be inferred.
  6148. unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
  6149. // If this is a GlobalAddress + cst, return the alignment.
  6150. const GlobalValue *GV;
  6151. int64_t GVOffset = 0;
  6152. if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
  6153. unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
  6154. APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
  6155. llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
  6156. getDataLayout());
  6157. unsigned AlignBits = KnownZero.countTrailingOnes();
  6158. unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
  6159. if (Align)
  6160. return MinAlign(Align, GVOffset);
  6161. }
  6162. // If this is a direct reference to a stack slot, use information about the
  6163. // stack slot's alignment.
  6164. int FrameIdx = 1 << 31;
  6165. int64_t FrameOffset = 0;
  6166. if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
  6167. FrameIdx = FI->getIndex();
  6168. } else if (isBaseWithConstantOffset(Ptr) &&
  6169. isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
  6170. // Handle FI+Cst
  6171. FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
  6172. FrameOffset = Ptr.getConstantOperandVal(1);
  6173. }
  6174. if (FrameIdx != (1 << 31)) {
  6175. const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
  6176. unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
  6177. FrameOffset);
  6178. return FIInfoAlign;
  6179. }
  6180. return 0;
  6181. }
  6182. /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
  6183. /// which is split (or expanded) into two not necessarily identical pieces.
  6184. std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
  6185. // Currently all types are split in half.
  6186. EVT LoVT, HiVT;
  6187. if (!VT.isVector()) {
  6188. LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
  6189. } else {
  6190. unsigned NumElements = VT.getVectorNumElements();
  6191. assert(!(NumElements & 1) && "Splitting vector, but not in half!");
  6192. LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
  6193. NumElements/2);
  6194. }
  6195. return std::make_pair(LoVT, HiVT);
  6196. }
  6197. /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
  6198. /// low/high part.
  6199. std::pair<SDValue, SDValue>
  6200. SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
  6201. const EVT &HiVT) {
  6202. assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
  6203. N.getValueType().getVectorNumElements() &&
  6204. "More vector elements requested than available!");
  6205. SDValue Lo, Hi;
  6206. Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
  6207. getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
  6208. Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
  6209. getConstant(LoVT.getVectorNumElements(), DL,
  6210. TLI->getVectorIdxTy(getDataLayout())));
  6211. return std::make_pair(Lo, Hi);
  6212. }
  6213. void SelectionDAG::ExtractVectorElements(SDValue Op,
  6214. SmallVectorImpl<SDValue> &Args,
  6215. unsigned Start, unsigned Count) {
  6216. EVT VT = Op.getValueType();
  6217. if (Count == 0)
  6218. Count = VT.getVectorNumElements();
  6219. EVT EltVT = VT.getVectorElementType();
  6220. EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
  6221. SDLoc SL(Op);
  6222. for (unsigned i = Start, e = Start + Count; i != e; ++i) {
  6223. Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
  6224. Op, getConstant(i, SL, IdxTy)));
  6225. }
  6226. }
  6227. // getAddressSpace - Return the address space this GlobalAddress belongs to.
  6228. unsigned GlobalAddressSDNode::getAddressSpace() const {
  6229. return getGlobal()->getType()->getAddressSpace();
  6230. }
  6231. Type *ConstantPoolSDNode::getType() const {
  6232. if (isMachineConstantPoolEntry())
  6233. return Val.MachineCPVal->getType();
  6234. return Val.ConstVal->getType();
  6235. }
  6236. bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
  6237. APInt &SplatUndef,
  6238. unsigned &SplatBitSize,
  6239. bool &HasAnyUndefs,
  6240. unsigned MinSplatBits,
  6241. bool isBigEndian) const {
  6242. EVT VT = getValueType(0);
  6243. assert(VT.isVector() && "Expected a vector type");
  6244. unsigned sz = VT.getSizeInBits();
  6245. if (MinSplatBits > sz)
  6246. return false;
  6247. SplatValue = APInt(sz, 0);
  6248. SplatUndef = APInt(sz, 0);
  6249. // Get the bits. Bits with undefined values (when the corresponding element
  6250. // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
  6251. // in SplatValue. If any of the values are not constant, give up and return
  6252. // false.
  6253. unsigned int nOps = getNumOperands();
  6254. assert(nOps > 0 && "isConstantSplat has 0-size build vector");
  6255. unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
  6256. for (unsigned j = 0; j < nOps; ++j) {
  6257. unsigned i = isBigEndian ? nOps-1-j : j;
  6258. SDValue OpVal = getOperand(i);
  6259. unsigned BitPos = j * EltBitSize;
  6260. if (OpVal.getOpcode() == ISD::UNDEF)
  6261. SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
  6262. else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
  6263. SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
  6264. zextOrTrunc(sz) << BitPos;
  6265. else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
  6266. SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
  6267. else
  6268. return false;
  6269. }
  6270. // The build_vector is all constants or undefs. Find the smallest element
  6271. // size that splats the vector.
  6272. HasAnyUndefs = (SplatUndef != 0);
  6273. while (sz > 8) {
  6274. unsigned HalfSize = sz / 2;
  6275. APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
  6276. APInt LowValue = SplatValue.trunc(HalfSize);
  6277. APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
  6278. APInt LowUndef = SplatUndef.trunc(HalfSize);
  6279. // If the two halves do not match (ignoring undef bits), stop here.
  6280. if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
  6281. MinSplatBits > HalfSize)
  6282. break;
  6283. SplatValue = HighValue | LowValue;
  6284. SplatUndef = HighUndef & LowUndef;
  6285. sz = HalfSize;
  6286. }
  6287. SplatBitSize = sz;
  6288. return true;
  6289. }
  6290. SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
  6291. if (UndefElements) {
  6292. UndefElements->clear();
  6293. UndefElements->resize(getNumOperands());
  6294. }
  6295. SDValue Splatted;
  6296. for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
  6297. SDValue Op = getOperand(i);
  6298. if (Op.getOpcode() == ISD::UNDEF) {
  6299. if (UndefElements)
  6300. (*UndefElements)[i] = true;
  6301. } else if (!Splatted) {
  6302. Splatted = Op;
  6303. } else if (Splatted != Op) {
  6304. return SDValue();
  6305. }
  6306. }
  6307. if (!Splatted) {
  6308. assert(getOperand(0).getOpcode() == ISD::UNDEF &&
  6309. "Can only have a splat without a constant for all undefs.");
  6310. return getOperand(0);
  6311. }
  6312. return Splatted;
  6313. }
  6314. ConstantSDNode *
  6315. BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
  6316. return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
  6317. }
  6318. ConstantFPSDNode *
  6319. BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
  6320. return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
  6321. }
  6322. bool BuildVectorSDNode::isConstant() const {
  6323. for (const SDValue &Op : op_values()) {
  6324. unsigned Opc = Op.getOpcode();
  6325. if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
  6326. return false;
  6327. }
  6328. return true;
  6329. }
  6330. bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
  6331. // Find the first non-undef value in the shuffle mask.
  6332. unsigned i, e;
  6333. for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
  6334. /* search */;
  6335. assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
  6336. // Make sure all remaining elements are either undef or the same as the first
  6337. // non-undef value.
  6338. for (int Idx = Mask[i]; i != e; ++i)
  6339. if (Mask[i] >= 0 && Mask[i] != Idx)
  6340. return false;
  6341. return true;
  6342. }
  6343. #ifndef NDEBUG
  6344. static void checkForCyclesHelper(const SDNode *N,
  6345. SmallPtrSetImpl<const SDNode*> &Visited,
  6346. SmallPtrSetImpl<const SDNode*> &Checked,
  6347. const llvm::SelectionDAG *DAG) {
  6348. // If this node has already been checked, don't check it again.
  6349. if (Checked.count(N))
  6350. return;
  6351. // If a node has already been visited on this depth-first walk, reject it as
  6352. // a cycle.
  6353. if (!Visited.insert(N).second) {
  6354. errs() << "Detected cycle in SelectionDAG\n";
  6355. dbgs() << "Offending node:\n";
  6356. N->dumprFull(DAG); dbgs() << "\n";
  6357. abort();
  6358. }
  6359. for (const SDValue &Op : N->op_values())
  6360. checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
  6361. Checked.insert(N);
  6362. Visited.erase(N);
  6363. }
  6364. #endif
  6365. void llvm::checkForCycles(const llvm::SDNode *N,
  6366. const llvm::SelectionDAG *DAG,
  6367. bool force) {
  6368. #ifndef NDEBUG
  6369. bool check = force;
  6370. #ifdef XDEBUG
  6371. check = true;
  6372. #endif // XDEBUG
  6373. if (check) {
  6374. assert(N && "Checking nonexistent SDNode");
  6375. SmallPtrSet<const SDNode*, 32> visited;
  6376. SmallPtrSet<const SDNode*, 32> checked;
  6377. checkForCyclesHelper(N, visited, checked, DAG);
  6378. }
  6379. #endif // !NDEBUG
  6380. }
  6381. void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
  6382. checkForCycles(DAG->getRoot().getNode(), DAG, force);
  6383. }